server.go 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "log"
  11. "log/slog"
  12. "math/rand"
  13. "net"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path/filepath"
  18. "runtime"
  19. "strconv"
  20. "strings"
  21. "sync"
  22. "time"
  23. "golang.org/x/sync/semaphore"
  24. "github.com/ollama/ollama/api"
  25. "github.com/ollama/ollama/discover"
  26. "github.com/ollama/ollama/envconfig"
  27. "github.com/ollama/ollama/format"
  28. "github.com/ollama/ollama/grammar"
  29. "github.com/ollama/ollama/llama"
  30. "github.com/ollama/ollama/runners"
  31. )
  32. type LlamaServer interface {
  33. Ping(ctx context.Context) error
  34. WaitUntilRunning(ctx context.Context) error
  35. Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
  36. Embedding(ctx context.Context, input string) ([]float32, error)
  37. Tokenize(ctx context.Context, content string) ([]int, error)
  38. Detokenize(ctx context.Context, tokens []int) (string, error)
  39. Close() error
  40. EstimatedVRAM() uint64 // Total VRAM across all GPUs
  41. EstimatedTotal() uint64
  42. EstimatedVRAMByGPU(gpuID string) uint64
  43. }
  44. // llmServer is an instance of the llama.cpp server
  45. type llmServer struct {
  46. port int
  47. cmd *exec.Cmd
  48. done chan error // Channel to signal when the process exits
  49. status *StatusWriter
  50. options api.Options
  51. numParallel int
  52. modelPath string
  53. modelLock sync.Mutex // Temporary until we switch fully to Go server
  54. model *llama.Model // If non-nil, the runner is a new Go server
  55. estimate MemoryEstimate
  56. totalLayers uint64
  57. // gpuCount int
  58. gpus discover.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
  59. loadDuration time.Duration // Record how long it took the model to load
  60. loadProgress float32
  61. sem *semaphore.Weighted
  62. }
  63. // LoadModel will load a model from disk. The model must be in the GGML format.
  64. //
  65. // It collects array values for arrays with a size less than or equal to
  66. // maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
  67. // the maxArraySize is negative, all arrays are collected.
  68. func LoadModel(model string, maxArraySize int) (*GGML, error) {
  69. if _, err := os.Stat(model); err != nil {
  70. return nil, err
  71. }
  72. f, err := os.Open(model)
  73. if err != nil {
  74. return nil, err
  75. }
  76. defer f.Close()
  77. ggml, _, err := DecodeGGML(f, maxArraySize)
  78. return ggml, err
  79. }
  80. // NewLlamaServer will run a server for the given GPUs
  81. // The gpu list must be a single family.
  82. func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
  83. var err error
  84. var cpuRunner string
  85. var estimate MemoryEstimate
  86. var systemTotalMemory uint64
  87. var systemFreeMemory uint64
  88. var systemSwapFreeMemory uint64
  89. systemInfo := discover.GetSystemInfo()
  90. systemTotalMemory = systemInfo.System.TotalMemory
  91. systemFreeMemory = systemInfo.System.FreeMemory
  92. systemSwapFreeMemory = systemInfo.System.FreeSwap
  93. slog.Info("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
  94. // If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
  95. if opts.NumGPU == 0 {
  96. gpus = discover.GetCPUInfo()
  97. }
  98. if len(gpus) == 1 && gpus[0].Library == "cpu" {
  99. cpuRunner = runners.ServerForCpu()
  100. estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
  101. } else {
  102. estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
  103. switch {
  104. case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
  105. // disable partial offloading when model is greater than total system memory as this
  106. // can lead to locking up the system
  107. opts.NumGPU = 0
  108. case gpus[0].Library != "metal" && estimate.Layers == 0:
  109. // Don't bother loading into the GPU if no layers can fit
  110. cpuRunner = runners.ServerForCpu()
  111. gpus = discover.GetCPUInfo()
  112. case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
  113. opts.NumGPU = estimate.Layers
  114. }
  115. }
  116. // On linux and windows, over-allocating CPU memory will almost always result in an error
  117. // Darwin has fully dynamic swap so has no direct concept of free swap space
  118. if runtime.GOOS != "darwin" {
  119. systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
  120. available := systemFreeMemory + systemSwapFreeMemory
  121. if systemMemoryRequired > available {
  122. slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
  123. return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
  124. }
  125. }
  126. estimate.log()
  127. // Loop through potential servers
  128. finalErr := errors.New("no suitable llama servers found")
  129. availableServers := runners.GetAvailableServers()
  130. var servers []string
  131. if cpuRunner != "" {
  132. servers = []string{cpuRunner}
  133. } else {
  134. servers = runners.ServersForGpu(gpus[0].RunnerName()) // All GPUs in the list are matching Library and Variant
  135. }
  136. demandLib := envconfig.LLMLibrary()
  137. if demandLib != "" {
  138. serverPath := availableServers[demandLib]
  139. if serverPath == "" {
  140. slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
  141. } else {
  142. slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
  143. servers = []string{demandLib}
  144. if strings.HasPrefix(demandLib, "cpu") || (!(runtime.GOOS == "darwin" && runtime.GOARCH == "arm64") && demandLib == runners.BuiltinName()) {
  145. // Omit the GPU flag to silence the warning
  146. opts.NumGPU = -1
  147. }
  148. }
  149. }
  150. if len(servers) == 0 {
  151. return nil, fmt.Errorf("no servers found for %v", gpus)
  152. }
  153. params := []string{
  154. "--model", model,
  155. "--ctx-size", strconv.Itoa(opts.NumCtx),
  156. "--batch-size", strconv.Itoa(opts.NumBatch),
  157. }
  158. if opts.NumGPU >= 0 {
  159. params = append(params, "--n-gpu-layers", strconv.Itoa(opts.NumGPU))
  160. }
  161. if envconfig.Debug() {
  162. params = append(params, "--verbose")
  163. }
  164. if opts.MainGPU > 0 {
  165. params = append(params, "--main-gpu", strconv.Itoa(opts.MainGPU))
  166. }
  167. if len(adapters) > 0 {
  168. for _, adapter := range adapters {
  169. params = append(params, "--lora", adapter)
  170. }
  171. }
  172. if len(projectors) > 0 {
  173. // TODO: applying multiple projectors is not supported by the llama.cpp server yet
  174. params = append(params, "--mmproj", projectors[0])
  175. }
  176. defaultThreads := systemInfo.GetOptimalThreadCount()
  177. if opts.NumThread > 0 {
  178. params = append(params, "--threads", strconv.Itoa(opts.NumThread))
  179. } else if defaultThreads > 0 {
  180. params = append(params, "--threads", strconv.Itoa(defaultThreads))
  181. }
  182. fa := envconfig.FlashAttention()
  183. if fa && !gpus.FlashAttentionSupported() {
  184. slog.Warn("flash attention enabled but not supported by gpu")
  185. fa = false
  186. }
  187. if fa && !ggml.SupportsFlashAttention() {
  188. slog.Warn("flash attention enabled but not supported by model")
  189. fa = false
  190. }
  191. kvct := strings.ToLower(envconfig.KvCacheType())
  192. if fa {
  193. slog.Info("enabling flash attention")
  194. params = append(params, "--flash-attn")
  195. // Flash Attention also supports kv cache quantization
  196. // Enable if the requested and kv cache type is supported by the model
  197. if kvct != "" && ggml.SupportsKVCacheType(kvct) {
  198. params = append(params, "--kv-cache-type", kvct)
  199. } else {
  200. slog.Warn("kv cache type not supported by model", "type", kvct)
  201. }
  202. } else if kvct != "" && kvct != "f16" {
  203. slog.Warn("quantized kv cache requested but flash attention disabled", "type", kvct)
  204. }
  205. // mmap has issues with partial offloading on metal
  206. for _, g := range gpus {
  207. if g.Library == "metal" &&
  208. uint64(opts.NumGPU) > 0 &&
  209. uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
  210. opts.UseMMap = new(bool)
  211. *opts.UseMMap = false
  212. }
  213. }
  214. // Windows CUDA should not use mmap for best performance
  215. // Linux with a model larger than free space, mmap leads to thrashing
  216. // For CPU loads we want the memory to be allocated, not FS cache
  217. if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
  218. (runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
  219. (gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
  220. (opts.UseMMap != nil && !*opts.UseMMap) {
  221. params = append(params, "--no-mmap")
  222. }
  223. if opts.UseMLock {
  224. params = append(params, "--mlock")
  225. }
  226. // TODO - NUMA support currently doesn't work properly
  227. params = append(params, "--parallel", strconv.Itoa(numParallel))
  228. if estimate.TensorSplit != "" {
  229. params = append(params, "--tensor-split", estimate.TensorSplit)
  230. }
  231. if envconfig.MultiUserCache() {
  232. params = append(params, "--multiuser-cache")
  233. }
  234. for i := range servers {
  235. builtin := servers[i] == runners.BuiltinName()
  236. server := availableServers[servers[i]]
  237. if server == "" {
  238. // Shouldn't happen
  239. finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
  240. slog.Error("server list inconsistent", "error", finalErr)
  241. continue
  242. }
  243. if strings.HasPrefix(servers[i], "cpu") || (builtin && !(runtime.GOOS == "darwin" && runtime.GOARCH == "arm64")) {
  244. gpus = discover.GetCPUInfo()
  245. }
  246. // Find an availableServers port, retry on each iteration in case the failure was a port conflict race
  247. port := 0
  248. if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
  249. var l *net.TCPListener
  250. if l, err = net.ListenTCP("tcp", a); err == nil {
  251. port = l.Addr().(*net.TCPAddr).Port
  252. l.Close()
  253. }
  254. }
  255. if port == 0 {
  256. slog.Debug("ResolveTCPAddr failed ", "error", err)
  257. port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  258. }
  259. finalParams := []string{"runner"}
  260. finalParams = append(finalParams, params...)
  261. finalParams = append(finalParams, "--port", strconv.Itoa(port))
  262. pathEnv := "LD_LIBRARY_PATH"
  263. if runtime.GOOS == "windows" {
  264. pathEnv = "PATH"
  265. }
  266. // Start with the server directory for the LD_LIBRARY_PATH/PATH
  267. libraryPaths := []string{filepath.Dir(server)}
  268. if libraryPath, ok := os.LookupEnv(pathEnv); ok {
  269. // favor our bundled library dependencies over system libraries
  270. libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
  271. }
  272. // Note: we always put the dependency path first
  273. // since this was the exact version we compiled/linked against
  274. if gpus[0].DependencyPath != nil {
  275. // assume gpus from the same library have the same dependency path
  276. libraryPaths = append(gpus[0].DependencyPath, libraryPaths...)
  277. }
  278. // TODO - once fully switched to the Go runner, load the model here for tokenize/detokenize cgo access
  279. s := &llmServer{
  280. port: port,
  281. cmd: exec.Command(server, finalParams...),
  282. status: NewStatusWriter(os.Stderr),
  283. options: opts,
  284. modelPath: model,
  285. estimate: estimate,
  286. numParallel: numParallel,
  287. sem: semaphore.NewWeighted(int64(numParallel)),
  288. totalLayers: ggml.KV().BlockCount() + 1,
  289. gpus: gpus,
  290. done: make(chan error, 1),
  291. }
  292. s.cmd.Env = os.Environ()
  293. s.cmd.Stdout = os.Stdout
  294. s.cmd.Stderr = s.status
  295. s.cmd.SysProcAttr = LlamaServerSysProcAttr
  296. envWorkarounds := [][2]string{}
  297. for _, gpu := range gpus {
  298. envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
  299. }
  300. visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
  301. pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))
  302. // Update or add the path and visible devices variable with our adjusted version
  303. pathNeeded := true
  304. devicesNeeded := visibleDevicesEnv != ""
  305. for i := range s.cmd.Env {
  306. cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
  307. if strings.EqualFold(cmp[0], pathEnv) {
  308. s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
  309. pathNeeded = false
  310. } else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
  311. s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
  312. devicesNeeded = false
  313. } else if len(envWorkarounds) != 0 {
  314. for _, kv := range envWorkarounds {
  315. if strings.EqualFold(cmp[0], kv[0]) {
  316. s.cmd.Env[i] = kv[0] + "=" + kv[1]
  317. }
  318. }
  319. }
  320. }
  321. if pathNeeded {
  322. s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
  323. }
  324. if devicesNeeded {
  325. s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
  326. }
  327. slog.Info("starting llama server", "cmd", s.cmd.String())
  328. if envconfig.Debug() {
  329. filteredEnv := []string{}
  330. for _, ev := range s.cmd.Env {
  331. if strings.HasPrefix(ev, "CUDA_") ||
  332. strings.HasPrefix(ev, "ROCR_") ||
  333. strings.HasPrefix(ev, "ROCM_") ||
  334. strings.HasPrefix(ev, "HIP_") ||
  335. strings.HasPrefix(ev, "GPU_") ||
  336. strings.HasPrefix(ev, "HSA_") ||
  337. strings.HasPrefix(ev, "GGML_") ||
  338. strings.HasPrefix(ev, "PATH=") ||
  339. strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
  340. filteredEnv = append(filteredEnv, ev)
  341. }
  342. }
  343. // Log at debug as the environment is inherited and might contain sensitive information
  344. slog.Debug("subprocess", "environment", filteredEnv)
  345. }
  346. if err = s.cmd.Start(); err != nil {
  347. // Detect permission denied and augment the message about noexec
  348. if errors.Is(err, os.ErrPermission) {
  349. finalErr = fmt.Errorf("unable to start server %w. %s may have noexec set. Set OLLAMA_TMPDIR for server to a writable executable directory", err, server)
  350. continue
  351. }
  352. msg := ""
  353. if s.status != nil && s.status.LastErrMsg != "" {
  354. msg = s.status.LastErrMsg
  355. }
  356. err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
  357. finalErr = err
  358. continue
  359. }
  360. // reap subprocess when it exits
  361. go func() {
  362. err := s.cmd.Wait()
  363. // Favor a more detailed message over the process exit status
  364. if err != nil && s.status != nil && s.status.LastErrMsg != "" {
  365. slog.Debug("llama runner terminated", "error", err)
  366. if strings.Contains(s.status.LastErrMsg, "unknown model") {
  367. s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
  368. }
  369. s.done <- errors.New(s.status.LastErrMsg)
  370. } else {
  371. s.done <- err
  372. }
  373. }()
  374. return s, nil
  375. }
  376. slog.Error("unable to load any llama server", "error", finalErr)
  377. return nil, finalErr
  378. }
  379. type ServerStatus int
  380. const ( // iota is reset to 0
  381. ServerStatusReady ServerStatus = iota
  382. ServerStatusNoSlotsAvailable
  383. ServerStatusLoadingModel
  384. ServerStatusNotResponding
  385. ServerStatusError
  386. )
  387. func (s ServerStatus) ToString() string {
  388. switch s {
  389. case ServerStatusReady:
  390. return "llm server ready"
  391. case ServerStatusNoSlotsAvailable:
  392. return "llm busy - no slots available"
  393. case ServerStatusLoadingModel:
  394. return "llm server loading model"
  395. case ServerStatusNotResponding:
  396. return "llm server not responding"
  397. default:
  398. return "llm server error"
  399. }
  400. }
  401. type ServerStatusResp struct {
  402. Status string `json:"status"`
  403. SlotsIdle int `json:"slots_idle"`
  404. SlotsProcessing int `json:"slots_processing"`
  405. Error string `json:"error"`
  406. Progress float32 `json:"progress"`
  407. }
  408. func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
  409. // Fail fast if its exited
  410. if s.cmd.ProcessState != nil {
  411. msg := ""
  412. if s.status != nil && s.status.LastErrMsg != "" {
  413. msg = s.status.LastErrMsg
  414. }
  415. if s.cmd.ProcessState.ExitCode() == -1 {
  416. // Most likely a signal killed it, log some more details to try to help troubleshoot
  417. slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
  418. }
  419. return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  420. }
  421. req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
  422. if err != nil {
  423. return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
  424. }
  425. req.Header.Set("Content-Type", "application/json")
  426. resp, err := http.DefaultClient.Do(req)
  427. if err != nil {
  428. if errors.Is(err, context.DeadlineExceeded) {
  429. return ServerStatusNotResponding, errors.New("server not responding")
  430. }
  431. return ServerStatusError, fmt.Errorf("health resp: %w", err)
  432. }
  433. defer resp.Body.Close()
  434. body, err := io.ReadAll(resp.Body)
  435. if err != nil {
  436. return ServerStatusError, fmt.Errorf("read health request: %w", err)
  437. }
  438. var status ServerStatusResp
  439. if err := json.Unmarshal(body, &status); err != nil {
  440. return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
  441. }
  442. switch status.Status {
  443. case "ok":
  444. return ServerStatusReady, nil
  445. case "no slot available":
  446. return ServerStatusNoSlotsAvailable, nil
  447. case "loading model":
  448. s.loadProgress = status.Progress
  449. return ServerStatusLoadingModel, nil
  450. default:
  451. return ServerStatusError, fmt.Errorf("server error: %+v", status)
  452. }
  453. }
  454. // getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
  455. func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
  456. var retries int
  457. for {
  458. status, err := s.getServerStatus(ctx)
  459. if err != nil {
  460. return status, err
  461. }
  462. if status == ServerStatusNoSlotsAvailable {
  463. if retries >= 10 {
  464. return status, fmt.Errorf("no slots available after %d retries", retries)
  465. }
  466. time.Sleep(5 * time.Millisecond)
  467. retries++
  468. continue
  469. }
  470. return status, nil
  471. }
  472. }
  473. func (s *llmServer) Ping(ctx context.Context) error {
  474. _, err := s.getServerStatus(ctx)
  475. if err != nil {
  476. slog.Debug("server unhealthy", "error", err)
  477. return err
  478. }
  479. return nil
  480. }
  481. func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
  482. start := time.Now()
  483. stallDuration := envconfig.LoadTimeout() // If no progress happens
  484. stallTimer := time.Now().Add(stallDuration) // give up if we stall
  485. slog.Info("waiting for llama runner to start responding")
  486. var lastStatus ServerStatus = -1
  487. fullyLoaded := false
  488. for {
  489. select {
  490. case <-ctx.Done():
  491. slog.Warn("client connection closed before server finished loading, aborting load")
  492. return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
  493. case err := <-s.done:
  494. return fmt.Errorf("llama runner process has terminated: %w", err)
  495. default:
  496. }
  497. if time.Now().After(stallTimer) {
  498. // timeout
  499. msg := ""
  500. if s.status != nil && s.status.LastErrMsg != "" {
  501. msg = s.status.LastErrMsg
  502. }
  503. return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
  504. }
  505. if s.cmd.ProcessState != nil {
  506. msg := ""
  507. if s.status != nil && s.status.LastErrMsg != "" {
  508. msg = s.status.LastErrMsg
  509. }
  510. return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  511. }
  512. ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
  513. defer cancel()
  514. priorProgress := s.loadProgress
  515. status, _ := s.getServerStatus(ctx)
  516. if lastStatus != status && status != ServerStatusReady {
  517. // Only log on status changes
  518. slog.Info("waiting for server to become available", "status", status.ToString())
  519. }
  520. switch status {
  521. case ServerStatusReady:
  522. s.loadDuration = time.Since(start)
  523. slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
  524. return nil
  525. default:
  526. lastStatus = status
  527. // Reset the timer as long as we're making forward progress on the load
  528. if priorProgress != s.loadProgress {
  529. slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
  530. stallTimer = time.Now().Add(stallDuration)
  531. } else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
  532. slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
  533. stallTimer = time.Now().Add(stallDuration)
  534. fullyLoaded = true
  535. }
  536. time.Sleep(time.Millisecond * 250)
  537. continue
  538. }
  539. }
  540. }
  541. var grammarJSON = `
  542. root ::= object
  543. value ::= object | array | string | number | ("true" | "false" | "null") ws
  544. object ::=
  545. "{" ws (
  546. string ":" ws value
  547. ("," ws string ":" ws value)*
  548. )? "}" ws
  549. array ::=
  550. "[" ws (
  551. value
  552. ("," ws value)*
  553. )? "]" ws
  554. string ::=
  555. "\"" (
  556. [^"\\\x7F\x00-\x1F] |
  557. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  558. )* "\"" ws
  559. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  560. # Optional space: by convention, applied in this grammar after literal chars when allowed
  561. ws ::= ([ \t\n] ws)?
  562. `
  563. const maxBufferSize = 512 * format.KiloByte
  564. type ImageData struct {
  565. Data []byte `json:"data"`
  566. ID int `json:"id"`
  567. AspectRatioID int `json:"aspect_ratio_id"`
  568. }
  569. type completion struct {
  570. Content string `json:"content"`
  571. Model string `json:"model"`
  572. Prompt string `json:"prompt"`
  573. Stop bool `json:"stop"`
  574. StoppedLimit bool `json:"stopped_limit"`
  575. Timings struct {
  576. PredictedN int `json:"predicted_n"`
  577. PredictedMS float64 `json:"predicted_ms"`
  578. PromptN int `json:"prompt_n"`
  579. PromptMS float64 `json:"prompt_ms"`
  580. }
  581. }
  582. type CompletionRequest struct {
  583. Prompt string
  584. Format json.RawMessage
  585. Images []ImageData
  586. Options *api.Options
  587. }
  588. type CompletionResponse struct {
  589. Content string
  590. DoneReason string
  591. Done bool
  592. PromptEvalCount int
  593. PromptEvalDuration time.Duration
  594. EvalCount int
  595. EvalDuration time.Duration
  596. }
  597. func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
  598. request := map[string]any{
  599. "prompt": req.Prompt,
  600. "stream": true,
  601. "n_predict": req.Options.NumPredict,
  602. "n_keep": req.Options.NumKeep,
  603. "main_gpu": req.Options.MainGPU,
  604. "temperature": req.Options.Temperature,
  605. "top_k": req.Options.TopK,
  606. "top_p": req.Options.TopP,
  607. "min_p": req.Options.MinP,
  608. "typical_p": req.Options.TypicalP,
  609. "repeat_last_n": req.Options.RepeatLastN,
  610. "repeat_penalty": req.Options.RepeatPenalty,
  611. "presence_penalty": req.Options.PresencePenalty,
  612. "frequency_penalty": req.Options.FrequencyPenalty,
  613. "mirostat": req.Options.Mirostat,
  614. "mirostat_tau": req.Options.MirostatTau,
  615. "mirostat_eta": req.Options.MirostatEta,
  616. "penalize_nl": req.Options.PenalizeNewline,
  617. "seed": req.Options.Seed,
  618. "stop": req.Options.Stop,
  619. "image_data": req.Images,
  620. "cache_prompt": true,
  621. }
  622. if len(req.Format) > 0 {
  623. switch string(req.Format) {
  624. case `null`, `""`:
  625. // Field was set, but "missing" a value. We accept
  626. // these as "not set".
  627. break
  628. case `"json"`:
  629. request["grammar"] = grammarJSON
  630. default:
  631. if req.Format[0] != '{' {
  632. return fmt.Errorf("invalid format: %q; expected \"json\" or a valid JSON Schema object", req.Format)
  633. }
  634. // User provided a JSON schema
  635. g, err := grammar.FromSchema(nil, req.Format)
  636. if err != nil {
  637. return fmt.Errorf("invalid JSON schema in format: %w", err)
  638. }
  639. request["grammar"] = string(g)
  640. }
  641. }
  642. if err := s.sem.Acquire(ctx, 1); err != nil {
  643. if errors.Is(err, context.Canceled) {
  644. slog.Info("aborting completion request due to client closing the connection")
  645. } else {
  646. slog.Error("Failed to acquire semaphore", "error", err)
  647. }
  648. return err
  649. }
  650. defer s.sem.Release(1)
  651. // put an upper limit on num_predict to avoid the model running on forever
  652. if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
  653. req.Options.NumPredict = 10 * s.options.NumCtx
  654. }
  655. // Make sure the server is ready
  656. status, err := s.getServerStatusRetry(ctx)
  657. if err != nil {
  658. return err
  659. } else if status != ServerStatusReady {
  660. return fmt.Errorf("unexpected server status: %s", status.ToString())
  661. }
  662. // Handling JSON marshaling with special characters unescaped.
  663. buffer := &bytes.Buffer{}
  664. enc := json.NewEncoder(buffer)
  665. enc.SetEscapeHTML(false)
  666. if err := enc.Encode(request); err != nil {
  667. return fmt.Errorf("failed to marshal data: %v", err)
  668. }
  669. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
  670. serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  671. if err != nil {
  672. return fmt.Errorf("error creating POST request: %v", err)
  673. }
  674. serverReq.Header.Set("Content-Type", "application/json")
  675. res, err := http.DefaultClient.Do(serverReq)
  676. if err != nil {
  677. return fmt.Errorf("POST predict: %v", err)
  678. }
  679. defer res.Body.Close()
  680. if res.StatusCode >= 400 {
  681. bodyBytes, err := io.ReadAll(res.Body)
  682. if err != nil {
  683. return fmt.Errorf("failed reading llm error response: %w", err)
  684. }
  685. log.Printf("llm predict error: %s", bodyBytes)
  686. return fmt.Errorf("%s", bodyBytes)
  687. }
  688. scanner := bufio.NewScanner(res.Body)
  689. buf := make([]byte, 0, maxBufferSize)
  690. scanner.Buffer(buf, maxBufferSize)
  691. // keep track of the last token generated, this is used to abort if the model starts looping
  692. var lastToken string
  693. var tokenRepeat int
  694. for scanner.Scan() {
  695. select {
  696. case <-ctx.Done():
  697. // This handles the request cancellation
  698. return ctx.Err()
  699. default:
  700. line := scanner.Bytes()
  701. if len(line) == 0 {
  702. continue
  703. }
  704. // slog.Debug("got line", "line", string(line))
  705. evt, ok := bytes.CutPrefix(line, []byte("data: "))
  706. if !ok {
  707. evt = line
  708. }
  709. var c completion
  710. if err := json.Unmarshal(evt, &c); err != nil {
  711. return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
  712. }
  713. switch {
  714. case strings.TrimSpace(c.Content) == lastToken:
  715. tokenRepeat++
  716. default:
  717. lastToken = strings.TrimSpace(c.Content)
  718. tokenRepeat = 0
  719. }
  720. // 30 picked as an arbitrary max token repeat limit, modify as needed
  721. if tokenRepeat > 30 {
  722. slog.Debug("prediction aborted, token repeat limit reached")
  723. return ctx.Err()
  724. }
  725. if c.Content != "" {
  726. fn(CompletionResponse{
  727. Content: c.Content,
  728. })
  729. }
  730. if c.Stop {
  731. doneReason := "stop"
  732. if c.StoppedLimit {
  733. doneReason = "length"
  734. }
  735. fn(CompletionResponse{
  736. Done: true,
  737. DoneReason: doneReason,
  738. PromptEvalCount: c.Timings.PromptN,
  739. PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
  740. EvalCount: c.Timings.PredictedN,
  741. EvalDuration: parseDurationMs(c.Timings.PredictedMS),
  742. })
  743. return nil
  744. }
  745. }
  746. }
  747. if err := scanner.Err(); err != nil {
  748. if strings.Contains(err.Error(), "unexpected EOF") || strings.Contains(err.Error(), "forcibly closed") {
  749. s.Close()
  750. var msg string
  751. if s.status != nil && s.status.LastErrMsg != "" {
  752. msg = s.status.LastErrMsg
  753. } else {
  754. msg = err.Error()
  755. }
  756. return fmt.Errorf("an error was encountered while running the model: %s", msg)
  757. }
  758. return fmt.Errorf("error reading llm response: %v", err)
  759. }
  760. return nil
  761. }
  762. type EmbeddingRequest struct {
  763. Content string `json:"content"`
  764. }
  765. type EmbeddingResponse struct {
  766. Embedding []float32 `json:"embedding"`
  767. }
  768. func (s *llmServer) Embedding(ctx context.Context, input string) ([]float32, error) {
  769. if err := s.sem.Acquire(ctx, 1); err != nil {
  770. if errors.Is(err, context.Canceled) {
  771. slog.Info("aborting embedding request due to client closing the connection")
  772. } else {
  773. slog.Error("Failed to acquire semaphore", "error", err)
  774. }
  775. return nil, err
  776. }
  777. defer s.sem.Release(1)
  778. // Make sure the server is ready
  779. status, err := s.getServerStatusRetry(ctx)
  780. if err != nil {
  781. return nil, err
  782. } else if status != ServerStatusReady {
  783. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  784. }
  785. data, err := json.Marshal(EmbeddingRequest{Content: input})
  786. if err != nil {
  787. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  788. }
  789. r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
  790. if err != nil {
  791. return nil, fmt.Errorf("error creating embed request: %w", err)
  792. }
  793. r.Header.Set("Content-Type", "application/json")
  794. resp, err := http.DefaultClient.Do(r)
  795. if err != nil {
  796. return nil, fmt.Errorf("do embedding request: %w", err)
  797. }
  798. defer resp.Body.Close()
  799. body, err := io.ReadAll(resp.Body)
  800. if err != nil {
  801. return nil, fmt.Errorf("error reading embed response: %w", err)
  802. }
  803. if resp.StatusCode >= 400 {
  804. log.Printf("llm embedding error: %s", body)
  805. return nil, fmt.Errorf("%s", body)
  806. }
  807. var e EmbeddingResponse
  808. if err := json.Unmarshal(body, &e); err != nil {
  809. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  810. }
  811. return e.Embedding, nil
  812. }
  813. type TokenizeRequest struct {
  814. Content string `json:"content"`
  815. }
  816. type TokenizeResponse struct {
  817. Tokens []int `json:"tokens"`
  818. }
  819. func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
  820. s.modelLock.Lock()
  821. defer s.modelLock.Unlock()
  822. if s.model != nil {
  823. return s.model.Tokenize(content, false, true)
  824. }
  825. // Make sure the server is ready
  826. status, err := s.getServerStatus(ctx)
  827. if err != nil {
  828. return nil, err
  829. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  830. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  831. }
  832. data, err := json.Marshal(TokenizeRequest{Content: content})
  833. if err != nil {
  834. return nil, fmt.Errorf("marshaling encode data: %w", err)
  835. }
  836. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
  837. if err != nil {
  838. return nil, fmt.Errorf("encode request: %w", err)
  839. }
  840. req.Header.Set("Content-Type", "application/json")
  841. resp, err := http.DefaultClient.Do(req)
  842. if err != nil {
  843. return nil, fmt.Errorf("do encode request: %w", err)
  844. }
  845. defer resp.Body.Close()
  846. if resp.StatusCode == http.StatusNotFound {
  847. if s.model == nil {
  848. slog.Debug("new runner detected, loading model for cgo tokenization")
  849. m, err := llama.LoadModelFromFile(s.modelPath, llama.ModelParams{VocabOnly: true})
  850. if err != nil {
  851. return nil, err
  852. }
  853. s.model = m
  854. }
  855. return s.model.Tokenize(content, false, true)
  856. }
  857. body, err := io.ReadAll(resp.Body)
  858. if err != nil {
  859. return nil, fmt.Errorf("read encode request: %w", err)
  860. }
  861. if resp.StatusCode >= 400 {
  862. log.Printf("llm encode error: %s", body)
  863. return nil, fmt.Errorf("%s", body)
  864. }
  865. var encoded TokenizeResponse
  866. if err := json.Unmarshal(body, &encoded); err != nil {
  867. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  868. }
  869. return encoded.Tokens, nil
  870. }
  871. type DetokenizeRequest struct {
  872. Tokens []int `json:"tokens"`
  873. }
  874. type DetokenizeResponse struct {
  875. Content string `json:"content"`
  876. }
  877. func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
  878. s.modelLock.Lock()
  879. defer s.modelLock.Unlock()
  880. if s.model != nil {
  881. var resp string
  882. for _, token := range tokens {
  883. resp += s.model.TokenToPiece(token)
  884. }
  885. return resp, nil
  886. }
  887. // Make sure the server is ready
  888. status, err := s.getServerStatus(ctx)
  889. if err != nil {
  890. return "", err
  891. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  892. return "", fmt.Errorf("unexpected server status: %s", status.ToString())
  893. }
  894. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  895. if err != nil {
  896. return "", fmt.Errorf("marshaling decode data: %w", err)
  897. }
  898. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
  899. if err != nil {
  900. return "", fmt.Errorf("decode request: %w", err)
  901. }
  902. req.Header.Set("Content-Type", "application/json")
  903. resp, err := http.DefaultClient.Do(req)
  904. if err != nil {
  905. return "", fmt.Errorf("do decode request: %w", err)
  906. }
  907. defer resp.Body.Close()
  908. if resp.StatusCode == http.StatusNotFound {
  909. if s.model == nil {
  910. slog.Debug("new runner detected, loading model for cgo tokenization")
  911. m, err := llama.LoadModelFromFile(s.modelPath, llama.ModelParams{VocabOnly: true})
  912. if err != nil {
  913. return "", err
  914. }
  915. s.model = m
  916. }
  917. var resp string
  918. for _, token := range tokens {
  919. resp += s.model.TokenToPiece(token)
  920. }
  921. return resp, nil
  922. }
  923. body, err := io.ReadAll(resp.Body)
  924. if err != nil {
  925. return "", fmt.Errorf("read decode request: %w", err)
  926. }
  927. if resp.StatusCode >= 400 {
  928. log.Printf("llm decode error: %s", body)
  929. return "", fmt.Errorf("%s", body)
  930. }
  931. var decoded DetokenizeResponse
  932. if err := json.Unmarshal(body, &decoded); err != nil {
  933. return "", fmt.Errorf("unmarshal encode response: %w", err)
  934. }
  935. return decoded.Content, nil
  936. }
  937. func (s *llmServer) Close() error {
  938. s.modelLock.Lock()
  939. if s.model != nil {
  940. llama.FreeModel(s.model)
  941. s.model = nil
  942. }
  943. s.modelLock.Unlock()
  944. if s.cmd != nil {
  945. slog.Debug("stopping llama server")
  946. if err := s.cmd.Process.Kill(); err != nil {
  947. return err
  948. }
  949. // if ProcessState is already populated, Wait already completed, no need to wait again
  950. if s.cmd.ProcessState == nil {
  951. slog.Debug("waiting for llama server to exit")
  952. <-s.done
  953. }
  954. slog.Debug("llama server stopped")
  955. }
  956. return nil
  957. }
  958. func (s *llmServer) EstimatedVRAM() uint64 {
  959. return s.estimate.VRAMSize
  960. }
  961. func (s *llmServer) EstimatedTotal() uint64 {
  962. return s.estimate.TotalSize
  963. }
  964. func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
  965. for i, gpu := range s.gpus {
  966. if gpu.ID == gpuID {
  967. if i < len(s.estimate.GPUSizes) {
  968. return s.estimate.GPUSizes[i]
  969. }
  970. }
  971. }
  972. return 0
  973. }
  974. func parseDurationMs(ms float64) time.Duration {
  975. dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  976. if err != nil {
  977. panic(err)
  978. }
  979. return dur
  980. }