server.go 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "log"
  11. "log/slog"
  12. "math/rand"
  13. "net"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path/filepath"
  18. "runtime"
  19. "strconv"
  20. "strings"
  21. "sync"
  22. "time"
  23. "golang.org/x/sync/semaphore"
  24. "github.com/ollama/ollama/api"
  25. "github.com/ollama/ollama/discover"
  26. "github.com/ollama/ollama/envconfig"
  27. "github.com/ollama/ollama/format"
  28. "github.com/ollama/ollama/llama"
  29. )
  30. type LlamaServer interface {
  31. Ping(ctx context.Context) error
  32. WaitUntilRunning(ctx context.Context) error
  33. Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
  34. Embedding(ctx context.Context, input string) ([]float32, error)
  35. Tokenize(ctx context.Context, content string) ([]int, error)
  36. Detokenize(ctx context.Context, tokens []int) (string, error)
  37. Close() error
  38. EstimatedVRAM() uint64 // Total VRAM across all GPUs
  39. EstimatedTotal() uint64
  40. EstimatedVRAMByGPU(gpuID string) uint64
  41. }
  42. // llmServer is an instance of the llama.cpp server
  43. type llmServer struct {
  44. port int
  45. cmd *exec.Cmd
  46. done chan error // Channel to signal when the process exits
  47. status *StatusWriter
  48. options api.Options
  49. numParallel int
  50. modelPath string
  51. modelLock sync.Mutex // Temporary until we switch fully to Go server
  52. model *llama.Model // If non-nil, the runner is a new Go server
  53. estimate MemoryEstimate
  54. totalLayers uint64
  55. // gpuCount int
  56. gpus discover.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
  57. loadDuration time.Duration // Record how long it took the model to load
  58. loadProgress float32
  59. sem *semaphore.Weighted
  60. }
  61. // LoadModel will load a model from disk. The model must be in the GGML format.
  62. //
  63. // It collects array values for arrays with a size less than or equal to
  64. // maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
  65. // the maxArraySize is negative, all arrays are collected.
  66. func LoadModel(model string, maxArraySize int) (*GGML, error) {
  67. if _, err := os.Stat(model); err != nil {
  68. return nil, err
  69. }
  70. f, err := os.Open(model)
  71. if err != nil {
  72. return nil, err
  73. }
  74. defer f.Close()
  75. ggml, _, err := DecodeGGML(f, maxArraySize)
  76. return ggml, err
  77. }
  78. // NewLlamaServer will run a server for the given GPUs
  79. // The gpu list must be a single family.
  80. func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
  81. var systemTotalMemory uint64
  82. var systemFreeMemory uint64
  83. var systemSwapFreeMemory uint64
  84. systemInfo := discover.GetSystemInfo()
  85. systemTotalMemory = systemInfo.System.TotalMemory
  86. systemFreeMemory = systemInfo.System.FreeMemory
  87. systemSwapFreeMemory = systemInfo.System.FreeSwap
  88. slog.Info("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
  89. // If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
  90. if opts.NumGPU == 0 {
  91. gpus = discover.GetCPUInfo()
  92. }
  93. estimate := EstimateGPULayers(gpus, ggml, projectors, opts)
  94. if len(gpus) > 1 || gpus[0].Library != "cpu" {
  95. switch {
  96. case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
  97. // disable partial offloading when model is greater than total system memory as this
  98. // can lead to locking up the system
  99. opts.NumGPU = 0
  100. case gpus[0].Library != "metal" && estimate.Layers == 0:
  101. // Don't bother loading into the GPU if no layers can fit
  102. gpus = discover.GetCPUInfo()
  103. case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
  104. opts.NumGPU = estimate.Layers
  105. }
  106. }
  107. // On linux and windows, over-allocating CPU memory will almost always result in an error
  108. // Darwin has fully dynamic swap so has no direct concept of free swap space
  109. if runtime.GOOS != "darwin" {
  110. systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
  111. available := systemFreeMemory + systemSwapFreeMemory
  112. if systemMemoryRequired > available {
  113. slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
  114. return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
  115. }
  116. }
  117. estimate.log()
  118. params := []string{
  119. "--model", model,
  120. "--ctx-size", strconv.Itoa(opts.NumCtx),
  121. "--batch-size", strconv.Itoa(opts.NumBatch),
  122. }
  123. if opts.NumGPU >= 0 {
  124. params = append(params, "--n-gpu-layers", strconv.Itoa(opts.NumGPU))
  125. }
  126. if envconfig.Debug() {
  127. params = append(params, "--verbose")
  128. }
  129. if opts.MainGPU > 0 {
  130. params = append(params, "--main-gpu", strconv.Itoa(opts.MainGPU))
  131. }
  132. if len(adapters) > 0 {
  133. for _, adapter := range adapters {
  134. params = append(params, "--lora", adapter)
  135. }
  136. }
  137. if len(projectors) > 0 {
  138. // TODO: applying multiple projectors is not supported by the llama.cpp server yet
  139. params = append(params, "--mmproj", projectors[0])
  140. }
  141. defaultThreads := systemInfo.GetOptimalThreadCount()
  142. if opts.NumThread > 0 {
  143. params = append(params, "--threads", strconv.Itoa(opts.NumThread))
  144. } else if defaultThreads > 0 {
  145. params = append(params, "--threads", strconv.Itoa(defaultThreads))
  146. }
  147. fa := envconfig.FlashAttention()
  148. if fa && !gpus.FlashAttentionSupported() {
  149. slog.Warn("flash attention enabled but not supported by gpu")
  150. fa = false
  151. }
  152. if fa && !ggml.SupportsFlashAttention() {
  153. slog.Warn("flash attention enabled but not supported by model")
  154. fa = false
  155. }
  156. kvct := strings.ToLower(envconfig.KvCacheType())
  157. if fa {
  158. slog.Info("enabling flash attention")
  159. params = append(params, "--flash-attn")
  160. // Flash Attention also supports kv cache quantization
  161. // Enable if the requested and kv cache type is supported by the model
  162. if kvct != "" && ggml.SupportsKVCacheType(kvct) {
  163. params = append(params, "--kv-cache-type", kvct)
  164. } else {
  165. slog.Warn("kv cache type not supported by model", "type", kvct)
  166. }
  167. } else if kvct != "" && kvct != "f16" {
  168. slog.Warn("quantized kv cache requested but flash attention disabled", "type", kvct)
  169. }
  170. // mmap has issues with partial offloading on metal
  171. for _, g := range gpus {
  172. if g.Library == "metal" &&
  173. uint64(opts.NumGPU) > 0 &&
  174. uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
  175. opts.UseMMap = new(bool)
  176. *opts.UseMMap = false
  177. }
  178. }
  179. // Windows CUDA should not use mmap for best performance
  180. // Linux with a model larger than free space, mmap leads to thrashing
  181. // For CPU loads we want the memory to be allocated, not FS cache
  182. if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
  183. (runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
  184. (gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
  185. (opts.UseMMap != nil && !*opts.UseMMap) {
  186. params = append(params, "--no-mmap")
  187. }
  188. if opts.UseMLock {
  189. params = append(params, "--mlock")
  190. }
  191. // TODO - NUMA support currently doesn't work properly
  192. params = append(params, "--parallel", strconv.Itoa(numParallel))
  193. if estimate.TensorSplit != "" {
  194. params = append(params, "--tensor-split", estimate.TensorSplit)
  195. }
  196. if envconfig.MultiUserCache() {
  197. params = append(params, "--multiuser-cache")
  198. }
  199. libs := make(map[string]string)
  200. if entries, err := os.ReadDir(discover.LibOllamaPath); err == nil {
  201. for _, entry := range entries {
  202. libs[entry.Name()] = filepath.Join(discover.LibOllamaPath, entry.Name())
  203. }
  204. }
  205. lib := gpus[0].RunnerName()
  206. requested := envconfig.LLMLibrary()
  207. if libs[requested] != "" {
  208. slog.Info("using requested gpu library", "requested", requested)
  209. lib = requested
  210. }
  211. var compatible []string
  212. for k := range libs {
  213. // exact match first
  214. if k == lib {
  215. compatible = append([]string{k}, compatible...)
  216. continue
  217. }
  218. // then match the family (e.g. 'cuda')
  219. if strings.Split(k, "_")[0] == strings.Split(lib, "_")[0] {
  220. compatible = append(compatible, k)
  221. }
  222. }
  223. slog.Debug("compatible gpu libraries", "compatible", compatible)
  224. // iterate through compatible GPU libraries such as 'cuda_v12', 'cuda_v11', 'rocm', etc.
  225. // adding each library's respective path to the LD_LIBRARY_PATH, until finally running
  226. // without any LD_LIBRARY_PATH flags
  227. for {
  228. port := 0
  229. if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
  230. var l *net.TCPListener
  231. if l, err = net.ListenTCP("tcp", a); err == nil {
  232. port = l.Addr().(*net.TCPAddr).Port
  233. l.Close()
  234. }
  235. }
  236. if port == 0 {
  237. slog.Debug("ResolveTCPAddr failed, using random port")
  238. port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  239. }
  240. finalParams := []string{"runner"}
  241. finalParams = append(finalParams, params...)
  242. finalParams = append(finalParams, "--port", strconv.Itoa(port))
  243. var pathEnv string
  244. switch runtime.GOOS {
  245. case "windows":
  246. pathEnv = "PATH"
  247. case "darwin":
  248. pathEnv = "DYLD_LIBRARY_PATH"
  249. default:
  250. pathEnv = "LD_LIBRARY_PATH"
  251. }
  252. var libraryPaths []string
  253. if libraryPath, ok := os.LookupEnv(pathEnv); ok {
  254. libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
  255. }
  256. if len(compatible) > 0 {
  257. c := compatible[0]
  258. if libpath, ok := libs[c]; ok {
  259. slog.Debug("adding gpu library", "path", libpath)
  260. libraryPaths = append(libraryPaths, libpath)
  261. }
  262. }
  263. // Note: we always put the dependency path first
  264. // since this was the exact version we compiled/linked against
  265. if gpus[0].DependencyPath != nil {
  266. slog.Debug("adding gpu dependency paths", "paths", gpus[0].DependencyPath)
  267. // assume gpus from the same library have the same dependency path
  268. libraryPaths = append(gpus[0].DependencyPath, libraryPaths...)
  269. }
  270. // finally, add the root library path
  271. libraryPaths = append(libraryPaths, discover.LibOllamaPath)
  272. exe, err := os.Executable()
  273. if err != nil {
  274. return nil, fmt.Errorf("unable to lookup executable path: %w", err)
  275. }
  276. exe, err = filepath.EvalSymlinks(exe)
  277. if err != nil {
  278. return nil, fmt.Errorf("unable to evaluate symlinks for executable path: %w", err)
  279. }
  280. // TODO - once fully switched to the Go runner, load the model here for tokenize/detokenize cgo access
  281. s := &llmServer{
  282. port: port,
  283. cmd: exec.Command(exe, finalParams...),
  284. status: NewStatusWriter(os.Stderr),
  285. options: opts,
  286. modelPath: model,
  287. estimate: estimate,
  288. numParallel: numParallel,
  289. sem: semaphore.NewWeighted(int64(numParallel)),
  290. totalLayers: ggml.KV().BlockCount() + 1,
  291. gpus: gpus,
  292. done: make(chan error, 1),
  293. }
  294. s.cmd.Env = os.Environ()
  295. s.cmd.Stdout = os.Stdout
  296. s.cmd.Stderr = s.status
  297. s.cmd.SysProcAttr = LlamaServerSysProcAttr
  298. envWorkarounds := [][2]string{}
  299. for _, gpu := range gpus {
  300. envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
  301. }
  302. visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
  303. pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))
  304. // Update or add the path and visible devices variable with our adjusted version
  305. pathNeeded := true
  306. devicesNeeded := visibleDevicesEnv != ""
  307. for i := range s.cmd.Env {
  308. cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
  309. if strings.EqualFold(cmp[0], pathEnv) {
  310. s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
  311. pathNeeded = false
  312. } else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
  313. s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
  314. devicesNeeded = false
  315. } else if len(envWorkarounds) != 0 {
  316. for _, kv := range envWorkarounds {
  317. if strings.EqualFold(cmp[0], kv[0]) {
  318. s.cmd.Env[i] = kv[0] + "=" + kv[1]
  319. }
  320. }
  321. }
  322. }
  323. if pathNeeded {
  324. s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
  325. }
  326. if devicesNeeded {
  327. s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
  328. }
  329. slog.Info("starting llama server", "cmd", s.cmd.String())
  330. if envconfig.Debug() {
  331. filteredEnv := []string{}
  332. for _, ev := range s.cmd.Env {
  333. if strings.HasPrefix(ev, "CUDA_") ||
  334. strings.HasPrefix(ev, "ROCR_") ||
  335. strings.HasPrefix(ev, "ROCM_") ||
  336. strings.HasPrefix(ev, "HIP_") ||
  337. strings.HasPrefix(ev, "GPU_") ||
  338. strings.HasPrefix(ev, "HSA_") ||
  339. strings.HasPrefix(ev, "GGML_") ||
  340. strings.HasPrefix(ev, "PATH=") ||
  341. strings.HasPrefix(ev, "LD_LIBRARY_PATH=") ||
  342. strings.HasPrefix(ev, "DYLD_LIBRARY_PATH=") {
  343. filteredEnv = append(filteredEnv, ev)
  344. }
  345. }
  346. // Log at debug as the environment is inherited and might contain sensitive information
  347. slog.Debug("subprocess", "environment", filteredEnv)
  348. }
  349. if err = s.cmd.Start(); err != nil {
  350. var msg string
  351. if s.status != nil && s.status.LastErrMsg != "" {
  352. msg = s.status.LastErrMsg
  353. }
  354. err := fmt.Errorf("error starting runner: %v %s", err, msg)
  355. if len(compatible) == 0 {
  356. return nil, err
  357. }
  358. slog.Warn("unable to start runner with compatible gpu", "error", err, "compatible", compatible)
  359. compatible = compatible[1:]
  360. continue
  361. }
  362. // reap subprocess when it exits
  363. go func() {
  364. err := s.cmd.Wait()
  365. // Favor a more detailed message over the process exit status
  366. if err != nil && s.status != nil && s.status.LastErrMsg != "" {
  367. slog.Error("llama runner terminated", "error", err)
  368. if strings.Contains(s.status.LastErrMsg, "unknown model") {
  369. s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
  370. }
  371. s.done <- errors.New(s.status.LastErrMsg)
  372. } else {
  373. s.done <- err
  374. }
  375. }()
  376. return s, nil
  377. }
  378. }
  379. type ServerStatus int
  380. const ( // iota is reset to 0
  381. ServerStatusReady ServerStatus = iota
  382. ServerStatusNoSlotsAvailable
  383. ServerStatusLoadingModel
  384. ServerStatusNotResponding
  385. ServerStatusError
  386. )
  387. func (s ServerStatus) ToString() string {
  388. switch s {
  389. case ServerStatusReady:
  390. return "llm server ready"
  391. case ServerStatusNoSlotsAvailable:
  392. return "llm busy - no slots available"
  393. case ServerStatusLoadingModel:
  394. return "llm server loading model"
  395. case ServerStatusNotResponding:
  396. return "llm server not responding"
  397. default:
  398. return "llm server error"
  399. }
  400. }
  401. type ServerStatusResp struct {
  402. Status string `json:"status"`
  403. SlotsIdle int `json:"slots_idle"`
  404. SlotsProcessing int `json:"slots_processing"`
  405. Error string `json:"error"`
  406. Progress float32 `json:"progress"`
  407. }
  408. func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
  409. // Fail fast if its exited
  410. if s.cmd.ProcessState != nil {
  411. msg := ""
  412. if s.status != nil && s.status.LastErrMsg != "" {
  413. msg = s.status.LastErrMsg
  414. }
  415. if s.cmd.ProcessState.ExitCode() == -1 {
  416. // Most likely a signal killed it, log some more details to try to help troubleshoot
  417. slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
  418. }
  419. return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  420. }
  421. req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
  422. if err != nil {
  423. return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
  424. }
  425. req.Header.Set("Content-Type", "application/json")
  426. resp, err := http.DefaultClient.Do(req)
  427. if err != nil {
  428. if errors.Is(err, context.DeadlineExceeded) {
  429. return ServerStatusNotResponding, errors.New("server not responding")
  430. }
  431. return ServerStatusError, fmt.Errorf("health resp: %w", err)
  432. }
  433. defer resp.Body.Close()
  434. body, err := io.ReadAll(resp.Body)
  435. if err != nil {
  436. return ServerStatusError, fmt.Errorf("read health request: %w", err)
  437. }
  438. var status ServerStatusResp
  439. if err := json.Unmarshal(body, &status); err != nil {
  440. return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
  441. }
  442. switch status.Status {
  443. case "ok":
  444. return ServerStatusReady, nil
  445. case "no slot available":
  446. return ServerStatusNoSlotsAvailable, nil
  447. case "loading model":
  448. s.loadProgress = status.Progress
  449. return ServerStatusLoadingModel, nil
  450. default:
  451. return ServerStatusError, fmt.Errorf("server error: %+v", status)
  452. }
  453. }
  454. // getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
  455. func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
  456. var retries int
  457. for {
  458. status, err := s.getServerStatus(ctx)
  459. if err != nil {
  460. return status, err
  461. }
  462. if status == ServerStatusNoSlotsAvailable {
  463. if retries >= 10 {
  464. return status, fmt.Errorf("no slots available after %d retries", retries)
  465. }
  466. time.Sleep(5 * time.Millisecond)
  467. retries++
  468. continue
  469. }
  470. return status, nil
  471. }
  472. }
  473. func (s *llmServer) Ping(ctx context.Context) error {
  474. _, err := s.getServerStatus(ctx)
  475. if err != nil {
  476. slog.Debug("server unhealthy", "error", err)
  477. return err
  478. }
  479. return nil
  480. }
  481. func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
  482. start := time.Now()
  483. stallDuration := envconfig.LoadTimeout() // If no progress happens
  484. stallTimer := time.Now().Add(stallDuration) // give up if we stall
  485. slog.Info("waiting for llama runner to start responding")
  486. var lastStatus ServerStatus = -1
  487. fullyLoaded := false
  488. for {
  489. select {
  490. case <-ctx.Done():
  491. slog.Warn("client connection closed before server finished loading, aborting load")
  492. return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
  493. case err := <-s.done:
  494. return fmt.Errorf("llama runner process has terminated: %w", err)
  495. default:
  496. }
  497. if time.Now().After(stallTimer) {
  498. // timeout
  499. msg := ""
  500. if s.status != nil && s.status.LastErrMsg != "" {
  501. msg = s.status.LastErrMsg
  502. }
  503. return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
  504. }
  505. if s.cmd.ProcessState != nil {
  506. msg := ""
  507. if s.status != nil && s.status.LastErrMsg != "" {
  508. msg = s.status.LastErrMsg
  509. }
  510. return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  511. }
  512. ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
  513. defer cancel()
  514. priorProgress := s.loadProgress
  515. status, _ := s.getServerStatus(ctx)
  516. if lastStatus != status && status != ServerStatusReady {
  517. // Only log on status changes
  518. slog.Info("waiting for server to become available", "status", status.ToString())
  519. }
  520. switch status {
  521. case ServerStatusReady:
  522. s.loadDuration = time.Since(start)
  523. slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
  524. return nil
  525. default:
  526. lastStatus = status
  527. // Reset the timer as long as we're making forward progress on the load
  528. if priorProgress != s.loadProgress {
  529. slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
  530. stallTimer = time.Now().Add(stallDuration)
  531. } else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
  532. slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
  533. stallTimer = time.Now().Add(stallDuration)
  534. fullyLoaded = true
  535. }
  536. time.Sleep(time.Millisecond * 250)
  537. continue
  538. }
  539. }
  540. }
  541. var grammarJSON = `
  542. root ::= object
  543. value ::= object | array | string | number | ("true" | "false" | "null") ws
  544. object ::=
  545. "{" ws (
  546. string ":" ws value
  547. ("," ws string ":" ws value)*
  548. )? "}" ws
  549. array ::=
  550. "[" ws (
  551. value
  552. ("," ws value)*
  553. )? "]" ws
  554. string ::=
  555. "\"" (
  556. [^"\\\x7F\x00-\x1F] |
  557. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  558. )* "\"" ws
  559. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  560. # Optional space: by convention, applied in this grammar after literal chars when allowed
  561. ws ::= ([ \t\n] ws)?
  562. `
  563. const maxBufferSize = 512 * format.KiloByte
  564. type ImageData struct {
  565. Data []byte `json:"data"`
  566. ID int `json:"id"`
  567. AspectRatioID int `json:"aspect_ratio_id"`
  568. }
  569. // TokenProbs represents probability information for a token
  570. type TokenProbs struct {
  571. TokenID int `json:"id"`
  572. Logit float32 `json:"logit"`
  573. Prob float32 `json:"prob"`
  574. LogProb float32 `json:"logprob"`
  575. Token string `json:"token"`
  576. }
  577. type completion struct {
  578. Content string `json:"content"`
  579. Model string `json:"model"`
  580. Prompt string `json:"prompt"`
  581. Stop bool `json:"stop"`
  582. StoppedLimit bool `json:"stopped_limit"`
  583. LogProbs []TokenProbs `json:"logprobs"`
  584. Timings struct {
  585. PredictedN int `json:"predicted_n"`
  586. PredictedMS float64 `json:"predicted_ms"`
  587. PromptN int `json:"prompt_n"`
  588. PromptMS float64 `json:"prompt_ms"`
  589. }
  590. }
  591. type CompletionRequest struct {
  592. Prompt string
  593. Format json.RawMessage
  594. Images []ImageData
  595. LogProbs int
  596. Options *api.Options
  597. }
  598. type CompletionResponse struct {
  599. Content string
  600. LogProbs []TokenProbs
  601. DoneReason string
  602. Done bool
  603. PromptEvalCount int
  604. PromptEvalDuration time.Duration
  605. EvalCount int
  606. EvalDuration time.Duration
  607. }
  608. func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
  609. request := map[string]any{
  610. "prompt": req.Prompt,
  611. "stream": true,
  612. "n_predict": req.Options.NumPredict,
  613. "n_keep": req.Options.NumKeep,
  614. "main_gpu": req.Options.MainGPU,
  615. "temperature": req.Options.Temperature,
  616. "top_k": req.Options.TopK,
  617. "top_p": req.Options.TopP,
  618. "min_p": req.Options.MinP,
  619. "typical_p": req.Options.TypicalP,
  620. "repeat_last_n": req.Options.RepeatLastN,
  621. "repeat_penalty": req.Options.RepeatPenalty,
  622. "presence_penalty": req.Options.PresencePenalty,
  623. "frequency_penalty": req.Options.FrequencyPenalty,
  624. "mirostat": req.Options.Mirostat,
  625. "mirostat_tau": req.Options.MirostatTau,
  626. "mirostat_eta": req.Options.MirostatEta,
  627. "seed": req.Options.Seed,
  628. "stop": req.Options.Stop,
  629. "image_data": req.Images,
  630. "logprobs": req.LogProbs,
  631. "cache_prompt": true,
  632. }
  633. fmt.Println("completion request:", request)
  634. if len(req.Format) > 0 {
  635. switch string(req.Format) {
  636. case `null`, `""`:
  637. // Field was set, but "missing" a value. We accept
  638. // these as "not set".
  639. break
  640. case `"json"`:
  641. request["grammar"] = grammarJSON
  642. default:
  643. if req.Format[0] != '{' {
  644. return fmt.Errorf("invalid format: %q; expected \"json\" or a valid JSON Schema object", req.Format)
  645. }
  646. // User provided a JSON schema
  647. g := llama.SchemaToGrammar(req.Format)
  648. if g == nil {
  649. return fmt.Errorf("invalid JSON schema in format")
  650. }
  651. request["grammar"] = string(g)
  652. }
  653. }
  654. if err := s.sem.Acquire(ctx, 1); err != nil {
  655. if errors.Is(err, context.Canceled) {
  656. slog.Info("aborting completion request due to client closing the connection")
  657. } else {
  658. slog.Error("Failed to acquire semaphore", "error", err)
  659. }
  660. return err
  661. }
  662. defer s.sem.Release(1)
  663. // put an upper limit on num_predict to avoid the model running on forever
  664. if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
  665. req.Options.NumPredict = 10 * s.options.NumCtx
  666. }
  667. // Make sure the server is ready
  668. status, err := s.getServerStatusRetry(ctx)
  669. if err != nil {
  670. return err
  671. } else if status != ServerStatusReady {
  672. return fmt.Errorf("unexpected server status: %s", status.ToString())
  673. }
  674. // Handling JSON marshaling with special characters unescaped.
  675. buffer := &bytes.Buffer{}
  676. enc := json.NewEncoder(buffer)
  677. enc.SetEscapeHTML(false)
  678. if err := enc.Encode(request); err != nil {
  679. return fmt.Errorf("failed to marshal data: %v", err)
  680. }
  681. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
  682. serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  683. if err != nil {
  684. return fmt.Errorf("error creating POST request: %v", err)
  685. }
  686. serverReq.Header.Set("Content-Type", "application/json")
  687. res, err := http.DefaultClient.Do(serverReq)
  688. if err != nil {
  689. return fmt.Errorf("POST predict: %v", err)
  690. }
  691. defer res.Body.Close()
  692. if res.StatusCode >= 400 {
  693. bodyBytes, err := io.ReadAll(res.Body)
  694. if err != nil {
  695. return fmt.Errorf("failed reading llm error response: %w", err)
  696. }
  697. log.Printf("llm predict error: %s", bodyBytes)
  698. return fmt.Errorf("%s", bodyBytes)
  699. }
  700. scanner := bufio.NewScanner(res.Body)
  701. buf := make([]byte, 0, maxBufferSize)
  702. scanner.Buffer(buf, maxBufferSize)
  703. // keep track of the last token generated, this is used to abort if the model starts looping
  704. var lastToken string
  705. var tokenRepeat int
  706. for scanner.Scan() {
  707. select {
  708. case <-ctx.Done():
  709. // This handles the request cancellation
  710. return ctx.Err()
  711. default:
  712. line := scanner.Bytes()
  713. if len(line) == 0 {
  714. continue
  715. }
  716. evt, ok := bytes.CutPrefix(line, []byte("data: "))
  717. if !ok {
  718. evt = line
  719. }
  720. var c completion
  721. if err := json.Unmarshal(evt, &c); err != nil {
  722. return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
  723. }
  724. switch {
  725. case strings.TrimSpace(c.Content) == lastToken:
  726. tokenRepeat++
  727. default:
  728. lastToken = strings.TrimSpace(c.Content)
  729. tokenRepeat = 0
  730. }
  731. // 30 picked as an arbitrary max token repeat limit, modify as needed
  732. if tokenRepeat > 30 {
  733. slog.Debug("prediction aborted, token repeat limit reached")
  734. return ctx.Err()
  735. }
  736. if c.Content != "" {
  737. fn(CompletionResponse{
  738. Content: c.Content,
  739. LogProbs: c.LogProbs,
  740. })
  741. }
  742. if c.Stop {
  743. doneReason := "stop"
  744. if c.StoppedLimit {
  745. doneReason = "length"
  746. }
  747. fn(CompletionResponse{
  748. Done: true,
  749. DoneReason: doneReason,
  750. PromptEvalCount: c.Timings.PromptN,
  751. PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
  752. EvalCount: c.Timings.PredictedN,
  753. EvalDuration: parseDurationMs(c.Timings.PredictedMS),
  754. LogProbs: c.LogProbs,
  755. })
  756. return nil
  757. }
  758. }
  759. }
  760. if err := scanner.Err(); err != nil {
  761. if strings.Contains(err.Error(), "unexpected EOF") || strings.Contains(err.Error(), "forcibly closed") {
  762. s.Close()
  763. var msg string
  764. if s.status != nil && s.status.LastErrMsg != "" {
  765. msg = s.status.LastErrMsg
  766. } else {
  767. msg = err.Error()
  768. }
  769. return fmt.Errorf("an error was encountered while running the model: %s", msg)
  770. }
  771. return fmt.Errorf("error reading llm response: %v", err)
  772. }
  773. return nil
  774. }
  775. type EmbeddingRequest struct {
  776. Content string `json:"content"`
  777. }
  778. type EmbeddingResponse struct {
  779. Embedding []float32 `json:"embedding"`
  780. }
  781. func (s *llmServer) Embedding(ctx context.Context, input string) ([]float32, error) {
  782. if err := s.sem.Acquire(ctx, 1); err != nil {
  783. if errors.Is(err, context.Canceled) {
  784. slog.Info("aborting embedding request due to client closing the connection")
  785. } else {
  786. slog.Error("Failed to acquire semaphore", "error", err)
  787. }
  788. return nil, err
  789. }
  790. defer s.sem.Release(1)
  791. // Make sure the server is ready
  792. status, err := s.getServerStatusRetry(ctx)
  793. if err != nil {
  794. return nil, err
  795. } else if status != ServerStatusReady {
  796. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  797. }
  798. data, err := json.Marshal(EmbeddingRequest{Content: input})
  799. if err != nil {
  800. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  801. }
  802. r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
  803. if err != nil {
  804. return nil, fmt.Errorf("error creating embed request: %w", err)
  805. }
  806. r.Header.Set("Content-Type", "application/json")
  807. resp, err := http.DefaultClient.Do(r)
  808. if err != nil {
  809. return nil, fmt.Errorf("do embedding request: %w", err)
  810. }
  811. defer resp.Body.Close()
  812. body, err := io.ReadAll(resp.Body)
  813. if err != nil {
  814. return nil, fmt.Errorf("error reading embed response: %w", err)
  815. }
  816. if resp.StatusCode >= 400 {
  817. log.Printf("llm embedding error: %s", body)
  818. return nil, fmt.Errorf("%s", body)
  819. }
  820. var e EmbeddingResponse
  821. if err := json.Unmarshal(body, &e); err != nil {
  822. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  823. }
  824. return e.Embedding, nil
  825. }
  826. type TokenizeRequest struct {
  827. Content string `json:"content"`
  828. }
  829. type TokenizeResponse struct {
  830. Tokens []int `json:"tokens"`
  831. }
  832. func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
  833. s.modelLock.Lock()
  834. defer s.modelLock.Unlock()
  835. if s.model != nil {
  836. return s.model.Tokenize(content, false, true)
  837. }
  838. // Make sure the server is ready
  839. status, err := s.getServerStatus(ctx)
  840. if err != nil {
  841. return nil, err
  842. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  843. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  844. }
  845. data, err := json.Marshal(TokenizeRequest{Content: content})
  846. if err != nil {
  847. return nil, fmt.Errorf("marshaling encode data: %w", err)
  848. }
  849. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
  850. if err != nil {
  851. return nil, fmt.Errorf("encode request: %w", err)
  852. }
  853. req.Header.Set("Content-Type", "application/json")
  854. resp, err := http.DefaultClient.Do(req)
  855. if err != nil {
  856. return nil, fmt.Errorf("do encode request: %w", err)
  857. }
  858. defer resp.Body.Close()
  859. if resp.StatusCode == http.StatusNotFound {
  860. if s.model == nil {
  861. slog.Debug("new runner detected, loading model for cgo tokenization")
  862. m, err := llama.LoadModelFromFile(s.modelPath, llama.ModelParams{VocabOnly: true})
  863. if err != nil {
  864. return nil, err
  865. }
  866. s.model = m
  867. }
  868. return s.model.Tokenize(content, false, true)
  869. }
  870. body, err := io.ReadAll(resp.Body)
  871. if err != nil {
  872. return nil, fmt.Errorf("read encode request: %w", err)
  873. }
  874. if resp.StatusCode >= 400 {
  875. log.Printf("llm encode error: %s", body)
  876. return nil, fmt.Errorf("%s", body)
  877. }
  878. var encoded TokenizeResponse
  879. if err := json.Unmarshal(body, &encoded); err != nil {
  880. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  881. }
  882. return encoded.Tokens, nil
  883. }
  884. type DetokenizeRequest struct {
  885. Tokens []int `json:"tokens"`
  886. }
  887. type DetokenizeResponse struct {
  888. Content string `json:"content"`
  889. }
  890. func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
  891. s.modelLock.Lock()
  892. defer s.modelLock.Unlock()
  893. if s.model != nil {
  894. var resp string
  895. for _, token := range tokens {
  896. resp += s.model.TokenToPiece(token)
  897. }
  898. return resp, nil
  899. }
  900. // Make sure the server is ready
  901. status, err := s.getServerStatus(ctx)
  902. if err != nil {
  903. return "", err
  904. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  905. return "", fmt.Errorf("unexpected server status: %s", status.ToString())
  906. }
  907. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  908. if err != nil {
  909. return "", fmt.Errorf("marshaling decode data: %w", err)
  910. }
  911. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
  912. if err != nil {
  913. return "", fmt.Errorf("decode request: %w", err)
  914. }
  915. req.Header.Set("Content-Type", "application/json")
  916. resp, err := http.DefaultClient.Do(req)
  917. if err != nil {
  918. return "", fmt.Errorf("do decode request: %w", err)
  919. }
  920. defer resp.Body.Close()
  921. if resp.StatusCode == http.StatusNotFound {
  922. if s.model == nil {
  923. slog.Debug("new runner detected, loading model for cgo tokenization")
  924. m, err := llama.LoadModelFromFile(s.modelPath, llama.ModelParams{VocabOnly: true})
  925. if err != nil {
  926. return "", err
  927. }
  928. s.model = m
  929. }
  930. var resp string
  931. for _, token := range tokens {
  932. resp += s.model.TokenToPiece(token)
  933. }
  934. return resp, nil
  935. }
  936. body, err := io.ReadAll(resp.Body)
  937. if err != nil {
  938. return "", fmt.Errorf("read decode request: %w", err)
  939. }
  940. if resp.StatusCode >= 400 {
  941. log.Printf("llm decode error: %s", body)
  942. return "", fmt.Errorf("%s", body)
  943. }
  944. var decoded DetokenizeResponse
  945. if err := json.Unmarshal(body, &decoded); err != nil {
  946. return "", fmt.Errorf("unmarshal encode response: %w", err)
  947. }
  948. return decoded.Content, nil
  949. }
  950. func (s *llmServer) Close() error {
  951. s.modelLock.Lock()
  952. if s.model != nil {
  953. llama.FreeModel(s.model)
  954. s.model = nil
  955. }
  956. s.modelLock.Unlock()
  957. if s.cmd != nil {
  958. slog.Debug("stopping llama server")
  959. if err := s.cmd.Process.Kill(); err != nil {
  960. return err
  961. }
  962. // if ProcessState is already populated, Wait already completed, no need to wait again
  963. if s.cmd.ProcessState == nil {
  964. slog.Debug("waiting for llama server to exit")
  965. <-s.done
  966. }
  967. slog.Debug("llama server stopped")
  968. }
  969. return nil
  970. }
  971. func (s *llmServer) EstimatedVRAM() uint64 {
  972. return s.estimate.VRAMSize
  973. }
  974. func (s *llmServer) EstimatedTotal() uint64 {
  975. return s.estimate.TotalSize
  976. }
  977. func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
  978. for i, gpu := range s.gpus {
  979. if gpu.ID == gpuID {
  980. if i < len(s.estimate.GPUSizes) {
  981. return s.estimate.GPUSizes[i]
  982. }
  983. }
  984. }
  985. return 0
  986. }
  987. func parseDurationMs(ms float64) time.Duration {
  988. dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  989. if err != nil {
  990. panic(err)
  991. }
  992. return dur
  993. }