server.go 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "log"
  11. "log/slog"
  12. "math/rand"
  13. "net"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path/filepath"
  18. "runtime"
  19. "strconv"
  20. "strings"
  21. "sync"
  22. "time"
  23. "golang.org/x/sync/semaphore"
  24. "github.com/ollama/ollama/api"
  25. "github.com/ollama/ollama/discover"
  26. "github.com/ollama/ollama/envconfig"
  27. "github.com/ollama/ollama/format"
  28. "github.com/ollama/ollama/llama"
  29. )
  30. type LlamaServer interface {
  31. Ping(ctx context.Context) error
  32. WaitUntilRunning(ctx context.Context) error
  33. Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
  34. Embedding(ctx context.Context, input string) ([]float32, error)
  35. Tokenize(ctx context.Context, content string) ([]int, error)
  36. Detokenize(ctx context.Context, tokens []int) (string, error)
  37. Close() error
  38. EstimatedVRAM() uint64 // Total VRAM across all GPUs
  39. EstimatedTotal() uint64
  40. EstimatedVRAMByGPU(gpuID string) uint64
  41. }
  42. // llmServer is an instance of the llama.cpp server
  43. type llmServer struct {
  44. port int
  45. cmd *exec.Cmd
  46. done chan error // Channel to signal when the process exits
  47. status *StatusWriter
  48. options api.Options
  49. numParallel int
  50. modelPath string
  51. modelLock sync.Mutex // Temporary until we switch fully to Go server
  52. model *llama.Model // If non-nil, the runner is a new Go server
  53. estimate MemoryEstimate
  54. totalLayers uint64
  55. // gpuCount int
  56. gpus discover.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
  57. loadDuration time.Duration // Record how long it took the model to load
  58. loadProgress float32
  59. sem *semaphore.Weighted
  60. }
  61. // LoadModel will load a model from disk. The model must be in the GGML format.
  62. //
  63. // It collects array values for arrays with a size less than or equal to
  64. // maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
  65. // the maxArraySize is negative, all arrays are collected.
  66. func LoadModel(model string, maxArraySize int) (*GGML, error) {
  67. if _, err := os.Stat(model); err != nil {
  68. return nil, err
  69. }
  70. f, err := os.Open(model)
  71. if err != nil {
  72. return nil, err
  73. }
  74. defer f.Close()
  75. ggml, _, err := DecodeGGML(f, maxArraySize)
  76. return ggml, err
  77. }
  78. // NewLlamaServer will run a server for the given GPUs
  79. // The gpu list must be a single family.
  80. func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
  81. var systemTotalMemory uint64
  82. var systemFreeMemory uint64
  83. var systemSwapFreeMemory uint64
  84. systemInfo := discover.GetSystemInfo()
  85. systemTotalMemory = systemInfo.System.TotalMemory
  86. systemFreeMemory = systemInfo.System.FreeMemory
  87. systemSwapFreeMemory = systemInfo.System.FreeSwap
  88. slog.Info("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
  89. // If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
  90. if opts.NumGPU == 0 {
  91. gpus = discover.GetCPUInfo()
  92. }
  93. estimate := EstimateGPULayers(gpus, ggml, projectors, opts)
  94. if len(gpus) > 1 || gpus[0].Library != "cpu" {
  95. switch {
  96. case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
  97. // disable partial offloading when model is greater than total system memory as this
  98. // can lead to locking up the system
  99. opts.NumGPU = 0
  100. case gpus[0].Library != "metal" && estimate.Layers == 0:
  101. // Don't bother loading into the GPU if no layers can fit
  102. gpus = discover.GetCPUInfo()
  103. case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
  104. opts.NumGPU = estimate.Layers
  105. }
  106. }
  107. // On linux and windows, over-allocating CPU memory will almost always result in an error
  108. // Darwin has fully dynamic swap so has no direct concept of free swap space
  109. if runtime.GOOS != "darwin" {
  110. systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
  111. available := systemFreeMemory + systemSwapFreeMemory
  112. if systemMemoryRequired > available {
  113. slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
  114. return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
  115. }
  116. }
  117. estimate.log()
  118. params := []string{
  119. "--model", model,
  120. "--ctx-size", strconv.Itoa(opts.NumCtx),
  121. "--batch-size", strconv.Itoa(opts.NumBatch),
  122. }
  123. if opts.NumGPU >= 0 {
  124. params = append(params, "--n-gpu-layers", strconv.Itoa(opts.NumGPU))
  125. }
  126. if envconfig.Debug() {
  127. params = append(params, "--verbose")
  128. }
  129. if opts.MainGPU > 0 {
  130. params = append(params, "--main-gpu", strconv.Itoa(opts.MainGPU))
  131. }
  132. if len(adapters) > 0 {
  133. for _, adapter := range adapters {
  134. params = append(params, "--lora", adapter)
  135. }
  136. }
  137. if len(projectors) > 0 {
  138. // TODO: applying multiple projectors is not supported by the llama.cpp server yet
  139. params = append(params, "--mmproj", projectors[0])
  140. }
  141. defaultThreads := systemInfo.GetOptimalThreadCount()
  142. if opts.NumThread > 0 {
  143. params = append(params, "--threads", strconv.Itoa(opts.NumThread))
  144. } else if defaultThreads > 0 {
  145. params = append(params, "--threads", strconv.Itoa(defaultThreads))
  146. }
  147. fa := envconfig.FlashAttention()
  148. if fa && !gpus.FlashAttentionSupported() {
  149. slog.Warn("flash attention enabled but not supported by gpu")
  150. fa = false
  151. }
  152. if fa && !ggml.SupportsFlashAttention() {
  153. slog.Warn("flash attention enabled but not supported by model")
  154. fa = false
  155. }
  156. kvct := strings.ToLower(envconfig.KvCacheType())
  157. if fa {
  158. slog.Info("enabling flash attention")
  159. params = append(params, "--flash-attn")
  160. // Flash Attention also supports kv cache quantization
  161. // Enable if the requested and kv cache type is supported by the model
  162. if kvct != "" && ggml.SupportsKVCacheType(kvct) {
  163. params = append(params, "--kv-cache-type", kvct)
  164. } else {
  165. slog.Warn("kv cache type not supported by model", "type", kvct)
  166. }
  167. } else if kvct != "" && kvct != "f16" {
  168. slog.Warn("quantized kv cache requested but flash attention disabled", "type", kvct)
  169. }
  170. // mmap has issues with partial offloading on metal
  171. for _, g := range gpus {
  172. if g.Library == "metal" &&
  173. uint64(opts.NumGPU) > 0 &&
  174. uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
  175. opts.UseMMap = new(bool)
  176. *opts.UseMMap = false
  177. }
  178. }
  179. // Windows CUDA should not use mmap for best performance
  180. // Linux with a model larger than free space, mmap leads to thrashing
  181. // For CPU loads we want the memory to be allocated, not FS cache
  182. if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
  183. (runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
  184. (gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
  185. (opts.UseMMap != nil && !*opts.UseMMap) {
  186. params = append(params, "--no-mmap")
  187. }
  188. if opts.UseMLock {
  189. params = append(params, "--mlock")
  190. }
  191. // TODO - NUMA support currently doesn't work properly
  192. params = append(params, "--parallel", strconv.Itoa(numParallel))
  193. if estimate.TensorSplit != "" {
  194. params = append(params, "--tensor-split", estimate.TensorSplit)
  195. }
  196. if envconfig.MultiUserCache() {
  197. params = append(params, "--multiuser-cache")
  198. }
  199. libs := make(map[string]string)
  200. if entries, err := os.ReadDir(discover.LibOllamaPath); err == nil {
  201. for _, entry := range entries {
  202. libs[entry.Name()] = filepath.Join(discover.LibOllamaPath, entry.Name())
  203. }
  204. }
  205. lib := gpus[0].RunnerName()
  206. requested := envconfig.LLMLibrary()
  207. if libs[requested] != "" {
  208. slog.Info("using requested gpu library", "requested", requested)
  209. lib = requested
  210. }
  211. var compatible []string
  212. for k := range libs {
  213. // exact match first
  214. if k == lib {
  215. compatible = append([]string{k}, compatible...)
  216. continue
  217. }
  218. // then match the family (e.g. 'cuda')
  219. if strings.Split(k, "_")[0] == strings.Split(lib, "_")[0] {
  220. compatible = append(compatible, k)
  221. }
  222. }
  223. slog.Debug("compatible gpu libraries", "compatible", compatible)
  224. // iterate through compatible GPU libraries such as 'cuda_v12', 'cuda_v11', 'rocm', etc.
  225. // adding each library's respective path to the LD_LIBRARY_PATH, until finally running
  226. // without any LD_LIBRARY_PATH flags
  227. for {
  228. port := 0
  229. if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
  230. var l *net.TCPListener
  231. if l, err = net.ListenTCP("tcp", a); err == nil {
  232. port = l.Addr().(*net.TCPAddr).Port
  233. l.Close()
  234. }
  235. }
  236. if port == 0 {
  237. slog.Debug("ResolveTCPAddr failed, using random port")
  238. port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  239. }
  240. finalParams := []string{"runner"}
  241. finalParams = append(finalParams, params...)
  242. finalParams = append(finalParams, "--port", strconv.Itoa(port))
  243. var pathEnv string
  244. switch runtime.GOOS {
  245. case "windows":
  246. pathEnv = "PATH"
  247. case "darwin":
  248. pathEnv = "DYLD_LIBRARY_PATH"
  249. default:
  250. pathEnv = "LD_LIBRARY_PATH"
  251. }
  252. var libraryPaths []string
  253. if libraryPath, ok := os.LookupEnv(pathEnv); ok {
  254. libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
  255. }
  256. if len(compatible) > 0 {
  257. c := compatible[0]
  258. if libpath, ok := libs[c]; ok {
  259. slog.Debug("adding gpu library", "path", libpath)
  260. libraryPaths = append(libraryPaths, libpath)
  261. }
  262. }
  263. // Note: we always put the dependency path first
  264. // since this was the exact version we compiled/linked against
  265. if gpus[0].DependencyPath != nil {
  266. slog.Debug("adding gpu dependency paths", "paths", gpus[0].DependencyPath)
  267. // assume gpus from the same library have the same dependency path
  268. libraryPaths = append(gpus[0].DependencyPath, libraryPaths...)
  269. }
  270. // finally, add the root library path
  271. libraryPaths = append(libraryPaths, discover.LibOllamaPath)
  272. exe, err := os.Executable()
  273. if err != nil {
  274. return nil, fmt.Errorf("unable to lookup executable path: %w", err)
  275. }
  276. if eval, err := filepath.EvalSymlinks(exe); err == nil {
  277. exe = eval
  278. }
  279. // TODO - once fully switched to the Go runner, load the model here for tokenize/detokenize cgo access
  280. s := &llmServer{
  281. port: port,
  282. cmd: exec.Command(exe, finalParams...),
  283. status: NewStatusWriter(os.Stderr),
  284. options: opts,
  285. modelPath: model,
  286. estimate: estimate,
  287. numParallel: numParallel,
  288. sem: semaphore.NewWeighted(int64(numParallel)),
  289. totalLayers: ggml.KV().BlockCount() + 1,
  290. gpus: gpus,
  291. done: make(chan error, 1),
  292. }
  293. s.cmd.Env = os.Environ()
  294. s.cmd.Stdout = os.Stdout
  295. s.cmd.Stderr = s.status
  296. s.cmd.SysProcAttr = LlamaServerSysProcAttr
  297. envWorkarounds := [][2]string{}
  298. for _, gpu := range gpus {
  299. envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
  300. }
  301. visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
  302. pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))
  303. // Update or add the path and visible devices variable with our adjusted version
  304. pathNeeded := true
  305. devicesNeeded := visibleDevicesEnv != ""
  306. for i := range s.cmd.Env {
  307. cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
  308. if strings.EqualFold(cmp[0], pathEnv) {
  309. s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
  310. pathNeeded = false
  311. } else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
  312. s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
  313. devicesNeeded = false
  314. } else if len(envWorkarounds) != 0 {
  315. for _, kv := range envWorkarounds {
  316. if strings.EqualFold(cmp[0], kv[0]) {
  317. s.cmd.Env[i] = kv[0] + "=" + kv[1]
  318. }
  319. }
  320. }
  321. }
  322. if pathNeeded {
  323. s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
  324. }
  325. if devicesNeeded {
  326. s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
  327. }
  328. slog.Info("starting llama server", "cmd", s.cmd.String())
  329. if envconfig.Debug() {
  330. filteredEnv := []string{}
  331. for _, ev := range s.cmd.Env {
  332. if strings.HasPrefix(ev, "CUDA_") ||
  333. strings.HasPrefix(ev, "ROCR_") ||
  334. strings.HasPrefix(ev, "ROCM_") ||
  335. strings.HasPrefix(ev, "HIP_") ||
  336. strings.HasPrefix(ev, "GPU_") ||
  337. strings.HasPrefix(ev, "HSA_") ||
  338. strings.HasPrefix(ev, "GGML_") ||
  339. strings.HasPrefix(ev, "PATH=") ||
  340. strings.HasPrefix(ev, "LD_LIBRARY_PATH=") ||
  341. strings.HasPrefix(ev, "DYLD_LIBRARY_PATH=") {
  342. filteredEnv = append(filteredEnv, ev)
  343. }
  344. }
  345. // Log at debug as the environment is inherited and might contain sensitive information
  346. slog.Debug("subprocess", "environment", filteredEnv)
  347. }
  348. if err = s.cmd.Start(); err != nil {
  349. var msg string
  350. if s.status != nil && s.status.LastErrMsg != "" {
  351. msg = s.status.LastErrMsg
  352. }
  353. err := fmt.Errorf("error starting runner: %v %s", err, msg)
  354. if len(compatible) == 0 {
  355. return nil, err
  356. }
  357. slog.Warn("unable to start runner with compatible gpu", "error", err, "compatible", compatible)
  358. compatible = compatible[1:]
  359. continue
  360. }
  361. // reap subprocess when it exits
  362. go func() {
  363. err := s.cmd.Wait()
  364. // Favor a more detailed message over the process exit status
  365. if err != nil && s.status != nil && s.status.LastErrMsg != "" {
  366. slog.Error("llama runner terminated", "error", err)
  367. if strings.Contains(s.status.LastErrMsg, "unknown model") {
  368. s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
  369. }
  370. s.done <- errors.New(s.status.LastErrMsg)
  371. } else {
  372. s.done <- err
  373. }
  374. }()
  375. return s, nil
  376. }
  377. }
  378. type ServerStatus int
  379. const ( // iota is reset to 0
  380. ServerStatusReady ServerStatus = iota
  381. ServerStatusNoSlotsAvailable
  382. ServerStatusLoadingModel
  383. ServerStatusNotResponding
  384. ServerStatusError
  385. )
  386. func (s ServerStatus) ToString() string {
  387. switch s {
  388. case ServerStatusReady:
  389. return "llm server ready"
  390. case ServerStatusNoSlotsAvailable:
  391. return "llm busy - no slots available"
  392. case ServerStatusLoadingModel:
  393. return "llm server loading model"
  394. case ServerStatusNotResponding:
  395. return "llm server not responding"
  396. default:
  397. return "llm server error"
  398. }
  399. }
  400. type ServerStatusResp struct {
  401. Status string `json:"status"`
  402. SlotsIdle int `json:"slots_idle"`
  403. SlotsProcessing int `json:"slots_processing"`
  404. Error string `json:"error"`
  405. Progress float32 `json:"progress"`
  406. }
  407. func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
  408. // Fail fast if its exited
  409. if s.cmd.ProcessState != nil {
  410. msg := ""
  411. if s.status != nil && s.status.LastErrMsg != "" {
  412. msg = s.status.LastErrMsg
  413. }
  414. if s.cmd.ProcessState.ExitCode() == -1 {
  415. // Most likely a signal killed it, log some more details to try to help troubleshoot
  416. slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
  417. }
  418. return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  419. }
  420. req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
  421. if err != nil {
  422. return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
  423. }
  424. req.Header.Set("Content-Type", "application/json")
  425. resp, err := http.DefaultClient.Do(req)
  426. if err != nil {
  427. if errors.Is(err, context.DeadlineExceeded) {
  428. return ServerStatusNotResponding, errors.New("server not responding")
  429. }
  430. return ServerStatusError, fmt.Errorf("health resp: %w", err)
  431. }
  432. defer resp.Body.Close()
  433. body, err := io.ReadAll(resp.Body)
  434. if err != nil {
  435. return ServerStatusError, fmt.Errorf("read health request: %w", err)
  436. }
  437. var status ServerStatusResp
  438. if err := json.Unmarshal(body, &status); err != nil {
  439. return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
  440. }
  441. switch status.Status {
  442. case "ok":
  443. return ServerStatusReady, nil
  444. case "no slot available":
  445. return ServerStatusNoSlotsAvailable, nil
  446. case "loading model":
  447. s.loadProgress = status.Progress
  448. return ServerStatusLoadingModel, nil
  449. default:
  450. return ServerStatusError, fmt.Errorf("server error: %+v", status)
  451. }
  452. }
  453. // getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
  454. func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
  455. var retries int
  456. for {
  457. status, err := s.getServerStatus(ctx)
  458. if err != nil {
  459. return status, err
  460. }
  461. if status == ServerStatusNoSlotsAvailable {
  462. if retries >= 10 {
  463. return status, fmt.Errorf("no slots available after %d retries", retries)
  464. }
  465. time.Sleep(5 * time.Millisecond)
  466. retries++
  467. continue
  468. }
  469. return status, nil
  470. }
  471. }
  472. func (s *llmServer) Ping(ctx context.Context) error {
  473. _, err := s.getServerStatus(ctx)
  474. if err != nil {
  475. slog.Debug("server unhealthy", "error", err)
  476. return err
  477. }
  478. return nil
  479. }
  480. func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
  481. start := time.Now()
  482. stallDuration := envconfig.LoadTimeout() // If no progress happens
  483. stallTimer := time.Now().Add(stallDuration) // give up if we stall
  484. slog.Info("waiting for llama runner to start responding")
  485. var lastStatus ServerStatus = -1
  486. fullyLoaded := false
  487. for {
  488. select {
  489. case <-ctx.Done():
  490. slog.Warn("client connection closed before server finished loading, aborting load")
  491. return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
  492. case err := <-s.done:
  493. return fmt.Errorf("llama runner process has terminated: %w", err)
  494. default:
  495. }
  496. if time.Now().After(stallTimer) {
  497. // timeout
  498. msg := ""
  499. if s.status != nil && s.status.LastErrMsg != "" {
  500. msg = s.status.LastErrMsg
  501. }
  502. return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
  503. }
  504. if s.cmd.ProcessState != nil {
  505. msg := ""
  506. if s.status != nil && s.status.LastErrMsg != "" {
  507. msg = s.status.LastErrMsg
  508. }
  509. return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  510. }
  511. ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
  512. defer cancel()
  513. priorProgress := s.loadProgress
  514. status, _ := s.getServerStatus(ctx)
  515. if lastStatus != status && status != ServerStatusReady {
  516. // Only log on status changes
  517. slog.Info("waiting for server to become available", "status", status.ToString())
  518. }
  519. switch status {
  520. case ServerStatusReady:
  521. s.loadDuration = time.Since(start)
  522. slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
  523. return nil
  524. default:
  525. lastStatus = status
  526. // Reset the timer as long as we're making forward progress on the load
  527. if priorProgress != s.loadProgress {
  528. slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
  529. stallTimer = time.Now().Add(stallDuration)
  530. } else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
  531. slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
  532. stallTimer = time.Now().Add(stallDuration)
  533. fullyLoaded = true
  534. }
  535. time.Sleep(time.Millisecond * 250)
  536. continue
  537. }
  538. }
  539. }
  540. var grammarJSON = `
  541. root ::= object
  542. value ::= object | array | string | number | ("true" | "false" | "null") ws
  543. object ::=
  544. "{" ws (
  545. string ":" ws value
  546. ("," ws string ":" ws value)*
  547. )? "}" ws
  548. array ::=
  549. "[" ws (
  550. value
  551. ("," ws value)*
  552. )? "]" ws
  553. string ::=
  554. "\"" (
  555. [^"\\\x7F\x00-\x1F] |
  556. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  557. )* "\"" ws
  558. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  559. # Optional space: by convention, applied in this grammar after literal chars when allowed
  560. ws ::= ([ \t\n] ws)?
  561. `
  562. const maxBufferSize = 512 * format.KiloByte
  563. type ImageData struct {
  564. Data []byte `json:"data"`
  565. ID int `json:"id"`
  566. AspectRatioID int `json:"aspect_ratio_id"`
  567. }
  568. type completion struct {
  569. Content string `json:"content"`
  570. Model string `json:"model"`
  571. Prompt string `json:"prompt"`
  572. Stop bool `json:"stop"`
  573. StoppedLimit bool `json:"stopped_limit"`
  574. Timings struct {
  575. PredictedN int `json:"predicted_n"`
  576. PredictedMS float64 `json:"predicted_ms"`
  577. PromptN int `json:"prompt_n"`
  578. PromptMS float64 `json:"prompt_ms"`
  579. }
  580. }
  581. type CompletionRequest struct {
  582. Prompt string
  583. Format json.RawMessage
  584. Images []ImageData
  585. Options *api.Options
  586. }
  587. type CompletionResponse struct {
  588. Content string
  589. DoneReason string
  590. Done bool
  591. PromptEvalCount int
  592. PromptEvalDuration time.Duration
  593. EvalCount int
  594. EvalDuration time.Duration
  595. }
  596. func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
  597. request := map[string]any{
  598. "prompt": req.Prompt,
  599. "stream": true,
  600. "n_predict": req.Options.NumPredict,
  601. "n_keep": req.Options.NumKeep,
  602. "main_gpu": req.Options.MainGPU,
  603. "temperature": req.Options.Temperature,
  604. "top_k": req.Options.TopK,
  605. "top_p": req.Options.TopP,
  606. "min_p": req.Options.MinP,
  607. "typical_p": req.Options.TypicalP,
  608. "repeat_last_n": req.Options.RepeatLastN,
  609. "repeat_penalty": req.Options.RepeatPenalty,
  610. "presence_penalty": req.Options.PresencePenalty,
  611. "frequency_penalty": req.Options.FrequencyPenalty,
  612. "mirostat": req.Options.Mirostat,
  613. "mirostat_tau": req.Options.MirostatTau,
  614. "mirostat_eta": req.Options.MirostatEta,
  615. "seed": req.Options.Seed,
  616. "stop": req.Options.Stop,
  617. "image_data": req.Images,
  618. "cache_prompt": true,
  619. }
  620. if len(req.Format) > 0 {
  621. switch string(req.Format) {
  622. case `null`, `""`:
  623. // Field was set, but "missing" a value. We accept
  624. // these as "not set".
  625. break
  626. case `"json"`:
  627. request["grammar"] = grammarJSON
  628. default:
  629. if req.Format[0] != '{' {
  630. return fmt.Errorf("invalid format: %q; expected \"json\" or a valid JSON Schema object", req.Format)
  631. }
  632. // User provided a JSON schema
  633. g := llama.SchemaToGrammar(req.Format)
  634. if g == nil {
  635. return fmt.Errorf("invalid JSON schema in format")
  636. }
  637. request["grammar"] = string(g)
  638. }
  639. }
  640. if err := s.sem.Acquire(ctx, 1); err != nil {
  641. if errors.Is(err, context.Canceled) {
  642. slog.Info("aborting completion request due to client closing the connection")
  643. } else {
  644. slog.Error("Failed to acquire semaphore", "error", err)
  645. }
  646. return err
  647. }
  648. defer s.sem.Release(1)
  649. // put an upper limit on num_predict to avoid the model running on forever
  650. if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
  651. req.Options.NumPredict = 10 * s.options.NumCtx
  652. }
  653. // Make sure the server is ready
  654. status, err := s.getServerStatusRetry(ctx)
  655. if err != nil {
  656. return err
  657. } else if status != ServerStatusReady {
  658. return fmt.Errorf("unexpected server status: %s", status.ToString())
  659. }
  660. // Handling JSON marshaling with special characters unescaped.
  661. buffer := &bytes.Buffer{}
  662. enc := json.NewEncoder(buffer)
  663. enc.SetEscapeHTML(false)
  664. if err := enc.Encode(request); err != nil {
  665. return fmt.Errorf("failed to marshal data: %v", err)
  666. }
  667. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
  668. serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  669. if err != nil {
  670. return fmt.Errorf("error creating POST request: %v", err)
  671. }
  672. serverReq.Header.Set("Content-Type", "application/json")
  673. res, err := http.DefaultClient.Do(serverReq)
  674. if err != nil {
  675. return fmt.Errorf("POST predict: %v", err)
  676. }
  677. defer res.Body.Close()
  678. if res.StatusCode >= 400 {
  679. bodyBytes, err := io.ReadAll(res.Body)
  680. if err != nil {
  681. return fmt.Errorf("failed reading llm error response: %w", err)
  682. }
  683. log.Printf("llm predict error: %s", bodyBytes)
  684. return fmt.Errorf("%s", bodyBytes)
  685. }
  686. scanner := bufio.NewScanner(res.Body)
  687. buf := make([]byte, 0, maxBufferSize)
  688. scanner.Buffer(buf, maxBufferSize)
  689. // keep track of the last token generated, this is used to abort if the model starts looping
  690. var lastToken string
  691. var tokenRepeat int
  692. for scanner.Scan() {
  693. select {
  694. case <-ctx.Done():
  695. // This handles the request cancellation
  696. return ctx.Err()
  697. default:
  698. line := scanner.Bytes()
  699. if len(line) == 0 {
  700. continue
  701. }
  702. // slog.Debug("got line", "line", string(line))
  703. evt, ok := bytes.CutPrefix(line, []byte("data: "))
  704. if !ok {
  705. evt = line
  706. }
  707. var c completion
  708. if err := json.Unmarshal(evt, &c); err != nil {
  709. return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
  710. }
  711. switch {
  712. case strings.TrimSpace(c.Content) == lastToken:
  713. tokenRepeat++
  714. default:
  715. lastToken = strings.TrimSpace(c.Content)
  716. tokenRepeat = 0
  717. }
  718. // 30 picked as an arbitrary max token repeat limit, modify as needed
  719. if tokenRepeat > 30 {
  720. slog.Debug("prediction aborted, token repeat limit reached")
  721. return ctx.Err()
  722. }
  723. if c.Content != "" {
  724. fn(CompletionResponse{
  725. Content: c.Content,
  726. })
  727. }
  728. if c.Stop {
  729. doneReason := "stop"
  730. if c.StoppedLimit {
  731. doneReason = "length"
  732. }
  733. fn(CompletionResponse{
  734. Done: true,
  735. DoneReason: doneReason,
  736. PromptEvalCount: c.Timings.PromptN,
  737. PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
  738. EvalCount: c.Timings.PredictedN,
  739. EvalDuration: parseDurationMs(c.Timings.PredictedMS),
  740. })
  741. return nil
  742. }
  743. }
  744. }
  745. if err := scanner.Err(); err != nil {
  746. if strings.Contains(err.Error(), "unexpected EOF") || strings.Contains(err.Error(), "forcibly closed") {
  747. s.Close()
  748. var msg string
  749. if s.status != nil && s.status.LastErrMsg != "" {
  750. msg = s.status.LastErrMsg
  751. } else {
  752. msg = err.Error()
  753. }
  754. return fmt.Errorf("an error was encountered while running the model: %s", msg)
  755. }
  756. return fmt.Errorf("error reading llm response: %v", err)
  757. }
  758. return nil
  759. }
  760. type EmbeddingRequest struct {
  761. Content string `json:"content"`
  762. }
  763. type EmbeddingResponse struct {
  764. Embedding []float32 `json:"embedding"`
  765. }
  766. func (s *llmServer) Embedding(ctx context.Context, input string) ([]float32, error) {
  767. if err := s.sem.Acquire(ctx, 1); err != nil {
  768. if errors.Is(err, context.Canceled) {
  769. slog.Info("aborting embedding request due to client closing the connection")
  770. } else {
  771. slog.Error("Failed to acquire semaphore", "error", err)
  772. }
  773. return nil, err
  774. }
  775. defer s.sem.Release(1)
  776. // Make sure the server is ready
  777. status, err := s.getServerStatusRetry(ctx)
  778. if err != nil {
  779. return nil, err
  780. } else if status != ServerStatusReady {
  781. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  782. }
  783. data, err := json.Marshal(EmbeddingRequest{Content: input})
  784. if err != nil {
  785. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  786. }
  787. r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
  788. if err != nil {
  789. return nil, fmt.Errorf("error creating embed request: %w", err)
  790. }
  791. r.Header.Set("Content-Type", "application/json")
  792. resp, err := http.DefaultClient.Do(r)
  793. if err != nil {
  794. return nil, fmt.Errorf("do embedding request: %w", err)
  795. }
  796. defer resp.Body.Close()
  797. body, err := io.ReadAll(resp.Body)
  798. if err != nil {
  799. return nil, fmt.Errorf("error reading embed response: %w", err)
  800. }
  801. if resp.StatusCode >= 400 {
  802. log.Printf("llm embedding error: %s", body)
  803. return nil, fmt.Errorf("%s", body)
  804. }
  805. var e EmbeddingResponse
  806. if err := json.Unmarshal(body, &e); err != nil {
  807. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  808. }
  809. return e.Embedding, nil
  810. }
  811. type TokenizeRequest struct {
  812. Content string `json:"content"`
  813. }
  814. type TokenizeResponse struct {
  815. Tokens []int `json:"tokens"`
  816. }
  817. func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
  818. s.modelLock.Lock()
  819. defer s.modelLock.Unlock()
  820. if s.model != nil {
  821. return s.model.Tokenize(content, false, true)
  822. }
  823. // Make sure the server is ready
  824. status, err := s.getServerStatus(ctx)
  825. if err != nil {
  826. return nil, err
  827. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  828. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  829. }
  830. data, err := json.Marshal(TokenizeRequest{Content: content})
  831. if err != nil {
  832. return nil, fmt.Errorf("marshaling encode data: %w", err)
  833. }
  834. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
  835. if err != nil {
  836. return nil, fmt.Errorf("encode request: %w", err)
  837. }
  838. req.Header.Set("Content-Type", "application/json")
  839. resp, err := http.DefaultClient.Do(req)
  840. if err != nil {
  841. return nil, fmt.Errorf("do encode request: %w", err)
  842. }
  843. defer resp.Body.Close()
  844. if resp.StatusCode == http.StatusNotFound {
  845. if s.model == nil {
  846. slog.Debug("new runner detected, loading model for cgo tokenization")
  847. m, err := llama.LoadModelFromFile(s.modelPath, llama.ModelParams{VocabOnly: true})
  848. if err != nil {
  849. return nil, err
  850. }
  851. s.model = m
  852. }
  853. return s.model.Tokenize(content, false, true)
  854. }
  855. body, err := io.ReadAll(resp.Body)
  856. if err != nil {
  857. return nil, fmt.Errorf("read encode request: %w", err)
  858. }
  859. if resp.StatusCode >= 400 {
  860. log.Printf("llm encode error: %s", body)
  861. return nil, fmt.Errorf("%s", body)
  862. }
  863. var encoded TokenizeResponse
  864. if err := json.Unmarshal(body, &encoded); err != nil {
  865. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  866. }
  867. return encoded.Tokens, nil
  868. }
  869. type DetokenizeRequest struct {
  870. Tokens []int `json:"tokens"`
  871. }
  872. type DetokenizeResponse struct {
  873. Content string `json:"content"`
  874. }
  875. func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
  876. s.modelLock.Lock()
  877. defer s.modelLock.Unlock()
  878. if s.model != nil {
  879. var resp string
  880. for _, token := range tokens {
  881. resp += s.model.TokenToPiece(token)
  882. }
  883. return resp, nil
  884. }
  885. // Make sure the server is ready
  886. status, err := s.getServerStatus(ctx)
  887. if err != nil {
  888. return "", err
  889. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  890. return "", fmt.Errorf("unexpected server status: %s", status.ToString())
  891. }
  892. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  893. if err != nil {
  894. return "", fmt.Errorf("marshaling decode data: %w", err)
  895. }
  896. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
  897. if err != nil {
  898. return "", fmt.Errorf("decode request: %w", err)
  899. }
  900. req.Header.Set("Content-Type", "application/json")
  901. resp, err := http.DefaultClient.Do(req)
  902. if err != nil {
  903. return "", fmt.Errorf("do decode request: %w", err)
  904. }
  905. defer resp.Body.Close()
  906. if resp.StatusCode == http.StatusNotFound {
  907. if s.model == nil {
  908. slog.Debug("new runner detected, loading model for cgo tokenization")
  909. m, err := llama.LoadModelFromFile(s.modelPath, llama.ModelParams{VocabOnly: true})
  910. if err != nil {
  911. return "", err
  912. }
  913. s.model = m
  914. }
  915. var resp string
  916. for _, token := range tokens {
  917. resp += s.model.TokenToPiece(token)
  918. }
  919. return resp, nil
  920. }
  921. body, err := io.ReadAll(resp.Body)
  922. if err != nil {
  923. return "", fmt.Errorf("read decode request: %w", err)
  924. }
  925. if resp.StatusCode >= 400 {
  926. log.Printf("llm decode error: %s", body)
  927. return "", fmt.Errorf("%s", body)
  928. }
  929. var decoded DetokenizeResponse
  930. if err := json.Unmarshal(body, &decoded); err != nil {
  931. return "", fmt.Errorf("unmarshal encode response: %w", err)
  932. }
  933. return decoded.Content, nil
  934. }
  935. func (s *llmServer) Close() error {
  936. s.modelLock.Lock()
  937. if s.model != nil {
  938. llama.FreeModel(s.model)
  939. s.model = nil
  940. }
  941. s.modelLock.Unlock()
  942. if s.cmd != nil {
  943. slog.Debug("stopping llama server")
  944. if err := s.cmd.Process.Kill(); err != nil {
  945. return err
  946. }
  947. // if ProcessState is already populated, Wait already completed, no need to wait again
  948. if s.cmd.ProcessState == nil {
  949. slog.Debug("waiting for llama server to exit")
  950. <-s.done
  951. }
  952. slog.Debug("llama server stopped")
  953. }
  954. return nil
  955. }
  956. func (s *llmServer) EstimatedVRAM() uint64 {
  957. return s.estimate.VRAMSize
  958. }
  959. func (s *llmServer) EstimatedTotal() uint64 {
  960. return s.estimate.TotalSize
  961. }
  962. func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
  963. for i, gpu := range s.gpus {
  964. if gpu.ID == gpuID {
  965. if i < len(s.estimate.GPUSizes) {
  966. return s.estimate.GPUSizes[i]
  967. }
  968. }
  969. }
  970. return 0
  971. }
  972. func parseDurationMs(ms float64) time.Duration {
  973. dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  974. if err != nil {
  975. panic(err)
  976. }
  977. return dur
  978. }