server.go 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "log"
  11. "log/slog"
  12. "math/rand"
  13. "net"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path/filepath"
  18. "runtime"
  19. "strconv"
  20. "strings"
  21. "sync"
  22. "time"
  23. "golang.org/x/sync/semaphore"
  24. "github.com/ollama/ollama/api"
  25. "github.com/ollama/ollama/discover"
  26. "github.com/ollama/ollama/envconfig"
  27. "github.com/ollama/ollama/format"
  28. "github.com/ollama/ollama/fs/ggml"
  29. "github.com/ollama/ollama/llama"
  30. )
  31. type LlamaServer interface {
  32. Ping(ctx context.Context) error
  33. WaitUntilRunning(ctx context.Context) error
  34. Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
  35. Embedding(ctx context.Context, input string) ([]float32, error)
  36. Tokenize(ctx context.Context, content string) ([]int, error)
  37. Detokenize(ctx context.Context, tokens []int) (string, error)
  38. Close() error
  39. EstimatedVRAM() uint64 // Total VRAM across all GPUs
  40. EstimatedTotal() uint64
  41. EstimatedVRAMByGPU(gpuID string) uint64
  42. }
  43. // llmServer is an instance of the llama.cpp server
  44. type llmServer struct {
  45. port int
  46. cmd *exec.Cmd
  47. done chan error // Channel to signal when the process exits
  48. status *StatusWriter
  49. options api.Options
  50. numParallel int
  51. modelPath string
  52. modelLock sync.Mutex // Temporary until we switch fully to Go server
  53. model *llama.Model // If non-nil, the runner is a new Go server
  54. estimate MemoryEstimate
  55. totalLayers uint64
  56. // gpuCount int
  57. gpus discover.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
  58. loadDuration time.Duration // Record how long it took the model to load
  59. loadProgress float32
  60. sem *semaphore.Weighted
  61. }
  62. // LoadModel will load a model from disk. The model must be in the GGML format.
  63. //
  64. // It collects array values for arrays with a size less than or equal to
  65. // maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
  66. // the maxArraySize is negative, all arrays are collected.
  67. func LoadModel(model string, maxArraySize int) (*ggml.GGML, error) {
  68. if _, err := os.Stat(model); err != nil {
  69. return nil, err
  70. }
  71. f, err := os.Open(model)
  72. if err != nil {
  73. return nil, err
  74. }
  75. defer f.Close()
  76. ggml, _, err := ggml.Decode(f, maxArraySize)
  77. return ggml, err
  78. }
  79. // NewLlamaServer will run a server for the given GPUs
  80. // The gpu list must be a single family.
  81. func NewLlamaServer(gpus discover.GpuInfoList, model string, f *ggml.GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
  82. systemInfo := discover.GetSystemInfo()
  83. systemTotalMemory := systemInfo.System.TotalMemory
  84. systemFreeMemory := systemInfo.System.FreeMemory
  85. systemSwapFreeMemory := systemInfo.System.FreeSwap
  86. slog.Info("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
  87. // If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
  88. if opts.NumGPU == 0 {
  89. gpus = discover.GetCPUInfo()
  90. }
  91. var estimate MemoryEstimate
  92. if len(gpus) == 1 && gpus[0].Library == "cpu" {
  93. estimate = EstimateGPULayers(gpus, f, projectors, opts)
  94. } else {
  95. estimate = EstimateGPULayers(gpus, f, projectors, opts)
  96. switch {
  97. case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
  98. // disable partial offloading when model is greater than total system memory as this
  99. // can lead to locking up the system
  100. opts.NumGPU = 0
  101. case gpus[0].Library != "metal" && estimate.Layers == 0:
  102. // Don't bother loading into the GPU if no layers can fit
  103. gpus = discover.GetCPUInfo()
  104. case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
  105. opts.NumGPU = estimate.Layers
  106. }
  107. }
  108. // On linux and windows, over-allocating CPU memory will almost always result in an error
  109. // Darwin has fully dynamic swap so has no direct concept of free swap space
  110. if runtime.GOOS != "darwin" {
  111. systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
  112. available := systemFreeMemory + systemSwapFreeMemory
  113. if systemMemoryRequired > available {
  114. slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
  115. return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
  116. }
  117. }
  118. slog.Info("offload", "", estimate)
  119. params := []string{
  120. "--model", model,
  121. "--ctx-size", strconv.Itoa(opts.NumCtx),
  122. "--batch-size", strconv.Itoa(opts.NumBatch),
  123. }
  124. if opts.NumGPU >= 0 {
  125. params = append(params, "--n-gpu-layers", strconv.Itoa(opts.NumGPU))
  126. }
  127. if envconfig.Debug() {
  128. params = append(params, "--verbose")
  129. }
  130. if opts.MainGPU > 0 {
  131. params = append(params, "--main-gpu", strconv.Itoa(opts.MainGPU))
  132. }
  133. if len(adapters) > 0 {
  134. for _, adapter := range adapters {
  135. params = append(params, "--lora", adapter)
  136. }
  137. }
  138. if len(projectors) > 0 {
  139. // TODO: applying multiple projectors is not supported by the llama.cpp server yet
  140. params = append(params, "--mmproj", projectors[0])
  141. }
  142. defaultThreads := systemInfo.GetOptimalThreadCount()
  143. if opts.NumThread > 0 {
  144. params = append(params, "--threads", strconv.Itoa(opts.NumThread))
  145. } else if defaultThreads > 0 {
  146. params = append(params, "--threads", strconv.Itoa(defaultThreads))
  147. }
  148. fa := envconfig.FlashAttention()
  149. if fa && !gpus.FlashAttentionSupported() {
  150. slog.Warn("flash attention enabled but not supported by gpu")
  151. fa = false
  152. }
  153. if fa && !f.SupportsFlashAttention() {
  154. slog.Warn("flash attention enabled but not supported by model")
  155. fa = false
  156. }
  157. kvct := strings.ToLower(envconfig.KvCacheType())
  158. if fa {
  159. slog.Info("enabling flash attention")
  160. params = append(params, "--flash-attn")
  161. // Flash Attention also supports kv cache quantization
  162. // Enable if the requested and kv cache type is supported by the model
  163. if kvct != "" && f.SupportsKVCacheType(kvct) {
  164. params = append(params, "--kv-cache-type", kvct)
  165. } else {
  166. slog.Warn("kv cache type not supported by model", "type", kvct)
  167. }
  168. } else if kvct != "" && kvct != "f16" {
  169. slog.Warn("quantized kv cache requested but flash attention disabled", "type", kvct)
  170. }
  171. // mmap has issues with partial offloading on metal
  172. for _, g := range gpus {
  173. if g.Library == "metal" &&
  174. uint64(opts.NumGPU) > 0 &&
  175. uint64(opts.NumGPU) < f.KV().BlockCount()+1 {
  176. opts.UseMMap = new(bool)
  177. *opts.UseMMap = false
  178. }
  179. }
  180. // Windows CUDA should not use mmap for best performance
  181. // Linux with a model larger than free space, mmap leads to thrashing
  182. // For CPU loads we want the memory to be allocated, not FS cache
  183. if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
  184. (runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
  185. (gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
  186. (opts.UseMMap != nil && !*opts.UseMMap) {
  187. params = append(params, "--no-mmap")
  188. }
  189. if opts.UseMLock {
  190. params = append(params, "--mlock")
  191. }
  192. // TODO - NUMA support currently doesn't work properly
  193. params = append(params, "--parallel", strconv.Itoa(numParallel))
  194. if estimate.TensorSplit != "" {
  195. params = append(params, "--tensor-split", estimate.TensorSplit)
  196. }
  197. if envconfig.MultiUserCache() {
  198. params = append(params, "--multiuser-cache")
  199. }
  200. exe, err := os.Executable()
  201. if err != nil {
  202. return nil, err
  203. }
  204. // Find an availableServers port, retry on each iteration in case the failure was a port conflict race
  205. port := 0
  206. if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
  207. var l *net.TCPListener
  208. if l, err = net.ListenTCP("tcp", a); err == nil {
  209. port = l.Addr().(*net.TCPAddr).Port
  210. l.Close()
  211. }
  212. }
  213. if port == 0 {
  214. slog.Debug("ResolveTCPAddr failed ", "error", err)
  215. port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  216. }
  217. finalParams := []string{"runner"}
  218. if envconfig.NewRunners() {
  219. finalParams = append(finalParams, "--new-runner")
  220. }
  221. finalParams = append(finalParams, params...)
  222. finalParams = append(finalParams, "--port", strconv.Itoa(port))
  223. pathEnv := "LD_LIBRARY_PATH"
  224. if runtime.GOOS == "windows" {
  225. pathEnv = "PATH"
  226. }
  227. // Start with the server directory for the LD_LIBRARY_PATH/PATH
  228. libraryPaths := []string{filepath.Dir(exe)}
  229. if libraryPath, ok := os.LookupEnv(pathEnv); ok {
  230. // favor our bundled library dependencies over system libraries
  231. libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
  232. }
  233. // Note: we always put the dependency path first
  234. // since this was the exact version we compiled/linked against
  235. if gpus[0].DependencyPath != nil {
  236. // assume gpus from the same library have the same dependency path
  237. libraryPaths = append(gpus[0].DependencyPath, libraryPaths...)
  238. }
  239. // TODO - once fully switched to the Go runner, load the model here for tokenize/detokenize cgo access
  240. s := &llmServer{
  241. port: port,
  242. cmd: exec.Command(exe, finalParams...),
  243. status: NewStatusWriter(os.Stderr),
  244. options: opts,
  245. modelPath: model,
  246. estimate: estimate,
  247. numParallel: numParallel,
  248. sem: semaphore.NewWeighted(int64(numParallel)),
  249. totalLayers: f.KV().BlockCount() + 1,
  250. gpus: gpus,
  251. done: make(chan error, 1),
  252. }
  253. s.cmd.Env = os.Environ()
  254. s.cmd.Stdout = os.Stdout
  255. s.cmd.Stderr = s.status
  256. s.cmd.SysProcAttr = LlamaServerSysProcAttr
  257. envWorkarounds := [][2]string{}
  258. for _, gpu := range gpus {
  259. envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
  260. }
  261. visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
  262. pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))
  263. // Update or add the path and visible devices variable with our adjusted version
  264. pathNeeded := true
  265. devicesNeeded := visibleDevicesEnv != ""
  266. for i := range s.cmd.Env {
  267. cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
  268. if strings.EqualFold(cmp[0], pathEnv) {
  269. s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
  270. pathNeeded = false
  271. } else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
  272. s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
  273. devicesNeeded = false
  274. } else if len(envWorkarounds) != 0 {
  275. for _, kv := range envWorkarounds {
  276. if strings.EqualFold(cmp[0], kv[0]) {
  277. s.cmd.Env[i] = kv[0] + "=" + kv[1]
  278. }
  279. }
  280. }
  281. }
  282. if pathNeeded {
  283. s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
  284. }
  285. if devicesNeeded {
  286. s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
  287. }
  288. slog.Info("starting llama server", "cmd", s.cmd.String())
  289. if envconfig.Debug() {
  290. filteredEnv := []string{}
  291. for _, ev := range s.cmd.Env {
  292. if strings.HasPrefix(ev, "CUDA_") ||
  293. strings.HasPrefix(ev, "ROCR_") ||
  294. strings.HasPrefix(ev, "ROCM_") ||
  295. strings.HasPrefix(ev, "HIP_") ||
  296. strings.HasPrefix(ev, "GPU_") ||
  297. strings.HasPrefix(ev, "HSA_") ||
  298. strings.HasPrefix(ev, "GGML_") ||
  299. strings.HasPrefix(ev, "PATH=") ||
  300. strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
  301. filteredEnv = append(filteredEnv, ev)
  302. }
  303. }
  304. // Log at debug as the environment is inherited and might contain sensitive information
  305. slog.Debug("subprocess", "environment", filteredEnv)
  306. }
  307. if err = s.cmd.Start(); err != nil {
  308. // Detect permission denied and augment the message about noexec
  309. if errors.Is(err, os.ErrPermission) {
  310. return nil, fmt.Errorf("unable to start server %w. %s may have noexec set. Set OLLAMA_TMPDIR for server to a writable executable directory", err, exe)
  311. }
  312. msg := ""
  313. if s.status != nil && s.status.LastErrMsg != "" {
  314. msg = s.status.LastErrMsg
  315. }
  316. return nil, fmt.Errorf("error starting the external llama server: %v %s", err, msg)
  317. }
  318. // reap subprocess when it exits
  319. go func() {
  320. err := s.cmd.Wait()
  321. // Favor a more detailed message over the process exit status
  322. if err != nil && s.status != nil && s.status.LastErrMsg != "" {
  323. slog.Debug("llama runner terminated", "error", err)
  324. if strings.Contains(s.status.LastErrMsg, "unknown model") {
  325. s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade"
  326. }
  327. s.done <- errors.New(s.status.LastErrMsg)
  328. } else {
  329. s.done <- err
  330. }
  331. }()
  332. return s, nil
  333. }
  334. type ServerStatus int
  335. const ( // iota is reset to 0
  336. ServerStatusReady ServerStatus = iota
  337. ServerStatusNoSlotsAvailable
  338. ServerStatusLoadingModel
  339. ServerStatusNotResponding
  340. ServerStatusError
  341. )
  342. func (s ServerStatus) ToString() string {
  343. switch s {
  344. case ServerStatusReady:
  345. return "llm server ready"
  346. case ServerStatusNoSlotsAvailable:
  347. return "llm busy - no slots available"
  348. case ServerStatusLoadingModel:
  349. return "llm server loading model"
  350. case ServerStatusNotResponding:
  351. return "llm server not responding"
  352. default:
  353. return "llm server error"
  354. }
  355. }
  356. type ServerStatusResp struct {
  357. Status string `json:"status"`
  358. SlotsIdle int `json:"slots_idle"`
  359. SlotsProcessing int `json:"slots_processing"`
  360. Error string `json:"error"`
  361. Progress float32 `json:"progress"`
  362. }
  363. func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
  364. // Fail fast if its exited
  365. if s.cmd.ProcessState != nil {
  366. msg := ""
  367. if s.status != nil && s.status.LastErrMsg != "" {
  368. msg = s.status.LastErrMsg
  369. }
  370. if s.cmd.ProcessState.ExitCode() == -1 {
  371. // Most likely a signal killed it, log some more details to try to help troubleshoot
  372. slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
  373. }
  374. return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  375. }
  376. req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
  377. if err != nil {
  378. return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
  379. }
  380. req.Header.Set("Content-Type", "application/json")
  381. resp, err := http.DefaultClient.Do(req)
  382. if err != nil {
  383. if errors.Is(err, context.DeadlineExceeded) {
  384. return ServerStatusNotResponding, errors.New("server not responding")
  385. }
  386. return ServerStatusError, fmt.Errorf("health resp: %w", err)
  387. }
  388. defer resp.Body.Close()
  389. body, err := io.ReadAll(resp.Body)
  390. if err != nil {
  391. return ServerStatusError, fmt.Errorf("read health request: %w", err)
  392. }
  393. var status ServerStatusResp
  394. if err := json.Unmarshal(body, &status); err != nil {
  395. return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
  396. }
  397. switch status.Status {
  398. case "ok":
  399. return ServerStatusReady, nil
  400. case "no slot available":
  401. return ServerStatusNoSlotsAvailable, nil
  402. case "loading model":
  403. s.loadProgress = status.Progress
  404. return ServerStatusLoadingModel, nil
  405. default:
  406. return ServerStatusError, fmt.Errorf("server error: %+v", status)
  407. }
  408. }
  409. // getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
  410. func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
  411. var retries int
  412. for {
  413. status, err := s.getServerStatus(ctx)
  414. if err != nil {
  415. return status, err
  416. }
  417. if status == ServerStatusNoSlotsAvailable {
  418. if retries >= 10 {
  419. return status, fmt.Errorf("no slots available after %d retries", retries)
  420. }
  421. time.Sleep(5 * time.Millisecond)
  422. retries++
  423. continue
  424. }
  425. return status, nil
  426. }
  427. }
  428. func (s *llmServer) Ping(ctx context.Context) error {
  429. _, err := s.getServerStatus(ctx)
  430. if err != nil {
  431. slog.Debug("server unhealthy", "error", err)
  432. return err
  433. }
  434. return nil
  435. }
  436. func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
  437. start := time.Now()
  438. stallDuration := envconfig.LoadTimeout() // If no progress happens
  439. stallTimer := time.Now().Add(stallDuration) // give up if we stall
  440. slog.Info("waiting for llama runner to start responding")
  441. var lastStatus ServerStatus = -1
  442. fullyLoaded := false
  443. for {
  444. select {
  445. case <-ctx.Done():
  446. slog.Warn("client connection closed before server finished loading, aborting load")
  447. return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
  448. case err := <-s.done:
  449. return fmt.Errorf("llama runner process has terminated: %w", err)
  450. default:
  451. }
  452. if time.Now().After(stallTimer) {
  453. // timeout
  454. msg := ""
  455. if s.status != nil && s.status.LastErrMsg != "" {
  456. msg = s.status.LastErrMsg
  457. }
  458. return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
  459. }
  460. if s.cmd.ProcessState != nil {
  461. msg := ""
  462. if s.status != nil && s.status.LastErrMsg != "" {
  463. msg = s.status.LastErrMsg
  464. }
  465. return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  466. }
  467. ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
  468. defer cancel()
  469. priorProgress := s.loadProgress
  470. status, _ := s.getServerStatus(ctx)
  471. if lastStatus != status && status != ServerStatusReady {
  472. // Only log on status changes
  473. slog.Info("waiting for server to become available", "status", status.ToString())
  474. }
  475. switch status {
  476. case ServerStatusReady:
  477. s.loadDuration = time.Since(start)
  478. slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
  479. return nil
  480. default:
  481. lastStatus = status
  482. // Reset the timer as long as we're making forward progress on the load
  483. if priorProgress != s.loadProgress {
  484. slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
  485. stallTimer = time.Now().Add(stallDuration)
  486. } else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
  487. slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
  488. stallTimer = time.Now().Add(stallDuration)
  489. fullyLoaded = true
  490. }
  491. time.Sleep(time.Millisecond * 250)
  492. continue
  493. }
  494. }
  495. }
  496. var grammarJSON = `
  497. root ::= object
  498. value ::= object | array | string | number | ("true" | "false" | "null") ws
  499. object ::=
  500. "{" ws (
  501. string ":" ws value
  502. ("," ws string ":" ws value)*
  503. )? "}" ws
  504. array ::=
  505. "[" ws (
  506. value
  507. ("," ws value)*
  508. )? "]" ws
  509. string ::=
  510. "\"" (
  511. [^"\\\x7F\x00-\x1F] |
  512. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  513. )* "\"" ws
  514. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  515. # Optional space: by convention, applied in this grammar after literal chars when allowed
  516. ws ::= ([ \t\n] ws)?
  517. `
  518. const maxBufferSize = 512 * format.KiloByte
  519. type ImageData struct {
  520. Data []byte `json:"data"`
  521. ID int `json:"id"`
  522. AspectRatioID int `json:"aspect_ratio_id"`
  523. }
  524. type completion struct {
  525. Content string `json:"content"`
  526. Model string `json:"model"`
  527. Prompt string `json:"prompt"`
  528. Stop bool `json:"stop"`
  529. StoppedLimit bool `json:"stopped_limit"`
  530. Timings struct {
  531. PredictedN int `json:"predicted_n"`
  532. PredictedMS float64 `json:"predicted_ms"`
  533. PromptN int `json:"prompt_n"`
  534. PromptMS float64 `json:"prompt_ms"`
  535. }
  536. }
  537. type CompletionRequest struct {
  538. Prompt string
  539. Format json.RawMessage
  540. Images []ImageData
  541. Options *api.Options
  542. }
  543. type CompletionResponse struct {
  544. Content string
  545. DoneReason string
  546. Done bool
  547. PromptEvalCount int
  548. PromptEvalDuration time.Duration
  549. EvalCount int
  550. EvalDuration time.Duration
  551. }
  552. func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
  553. request := map[string]any{
  554. "prompt": req.Prompt,
  555. "stream": true,
  556. "n_predict": req.Options.NumPredict,
  557. "n_keep": req.Options.NumKeep,
  558. "main_gpu": req.Options.MainGPU,
  559. "temperature": req.Options.Temperature,
  560. "top_k": req.Options.TopK,
  561. "top_p": req.Options.TopP,
  562. "min_p": req.Options.MinP,
  563. "typical_p": req.Options.TypicalP,
  564. "repeat_last_n": req.Options.RepeatLastN,
  565. "repeat_penalty": req.Options.RepeatPenalty,
  566. "presence_penalty": req.Options.PresencePenalty,
  567. "frequency_penalty": req.Options.FrequencyPenalty,
  568. "mirostat": req.Options.Mirostat,
  569. "mirostat_tau": req.Options.MirostatTau,
  570. "mirostat_eta": req.Options.MirostatEta,
  571. "seed": req.Options.Seed,
  572. "stop": req.Options.Stop,
  573. "image_data": req.Images,
  574. "cache_prompt": true,
  575. }
  576. if len(req.Format) > 0 {
  577. switch string(req.Format) {
  578. case `null`, `""`:
  579. // Field was set, but "missing" a value. We accept
  580. // these as "not set".
  581. break
  582. case `"json"`:
  583. request["grammar"] = grammarJSON
  584. default:
  585. if req.Format[0] != '{' {
  586. return fmt.Errorf("invalid format: %q; expected \"json\" or a valid JSON Schema object", req.Format)
  587. }
  588. // User provided a JSON schema
  589. g := llama.SchemaToGrammar(req.Format)
  590. if g == nil {
  591. return fmt.Errorf("invalid JSON schema in format")
  592. }
  593. request["grammar"] = string(g)
  594. }
  595. }
  596. if err := s.sem.Acquire(ctx, 1); err != nil {
  597. if errors.Is(err, context.Canceled) {
  598. slog.Info("aborting completion request due to client closing the connection")
  599. } else {
  600. slog.Error("Failed to acquire semaphore", "error", err)
  601. }
  602. return err
  603. }
  604. defer s.sem.Release(1)
  605. // put an upper limit on num_predict to avoid the model running on forever
  606. if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
  607. req.Options.NumPredict = 10 * s.options.NumCtx
  608. }
  609. // Make sure the server is ready
  610. status, err := s.getServerStatusRetry(ctx)
  611. if err != nil {
  612. return err
  613. } else if status != ServerStatusReady {
  614. return fmt.Errorf("unexpected server status: %s", status.ToString())
  615. }
  616. // Handling JSON marshaling with special characters unescaped.
  617. buffer := &bytes.Buffer{}
  618. enc := json.NewEncoder(buffer)
  619. enc.SetEscapeHTML(false)
  620. if err := enc.Encode(request); err != nil {
  621. return fmt.Errorf("failed to marshal data: %v", err)
  622. }
  623. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
  624. serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  625. if err != nil {
  626. return fmt.Errorf("error creating POST request: %v", err)
  627. }
  628. serverReq.Header.Set("Content-Type", "application/json")
  629. res, err := http.DefaultClient.Do(serverReq)
  630. if err != nil {
  631. return fmt.Errorf("POST predict: %v", err)
  632. }
  633. defer res.Body.Close()
  634. if res.StatusCode >= 400 {
  635. bodyBytes, err := io.ReadAll(res.Body)
  636. if err != nil {
  637. return fmt.Errorf("failed reading llm error response: %w", err)
  638. }
  639. log.Printf("llm predict error: %s", bodyBytes)
  640. return fmt.Errorf("%s", bodyBytes)
  641. }
  642. scanner := bufio.NewScanner(res.Body)
  643. buf := make([]byte, 0, maxBufferSize)
  644. scanner.Buffer(buf, maxBufferSize)
  645. // keep track of the last token generated, this is used to abort if the model starts looping
  646. var lastToken string
  647. var tokenRepeat int
  648. for scanner.Scan() {
  649. select {
  650. case <-ctx.Done():
  651. // This handles the request cancellation
  652. return ctx.Err()
  653. default:
  654. line := scanner.Bytes()
  655. if len(line) == 0 {
  656. continue
  657. }
  658. // slog.Debug("got line", "line", string(line))
  659. evt, ok := bytes.CutPrefix(line, []byte("data: "))
  660. if !ok {
  661. evt = line
  662. }
  663. var c completion
  664. if err := json.Unmarshal(evt, &c); err != nil {
  665. return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
  666. }
  667. switch {
  668. case strings.TrimSpace(c.Content) == lastToken:
  669. tokenRepeat++
  670. default:
  671. lastToken = strings.TrimSpace(c.Content)
  672. tokenRepeat = 0
  673. }
  674. // 30 picked as an arbitrary max token repeat limit, modify as needed
  675. if tokenRepeat > 30 {
  676. slog.Debug("prediction aborted, token repeat limit reached")
  677. return ctx.Err()
  678. }
  679. if c.Content != "" {
  680. fn(CompletionResponse{
  681. Content: c.Content,
  682. })
  683. }
  684. if c.Stop {
  685. doneReason := "stop"
  686. if c.StoppedLimit {
  687. doneReason = "length"
  688. }
  689. fn(CompletionResponse{
  690. Done: true,
  691. DoneReason: doneReason,
  692. PromptEvalCount: c.Timings.PromptN,
  693. PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
  694. EvalCount: c.Timings.PredictedN,
  695. EvalDuration: parseDurationMs(c.Timings.PredictedMS),
  696. })
  697. return nil
  698. }
  699. }
  700. }
  701. if err := scanner.Err(); err != nil {
  702. if strings.Contains(err.Error(), "unexpected EOF") || strings.Contains(err.Error(), "forcibly closed") {
  703. s.Close()
  704. var msg string
  705. if s.status != nil && s.status.LastErrMsg != "" {
  706. msg = s.status.LastErrMsg
  707. } else {
  708. msg = err.Error()
  709. }
  710. return fmt.Errorf("an error was encountered while running the model: %s", msg)
  711. }
  712. return fmt.Errorf("error reading llm response: %v", err)
  713. }
  714. return nil
  715. }
  716. type EmbeddingRequest struct {
  717. Content string `json:"content"`
  718. }
  719. type EmbeddingResponse struct {
  720. Embedding []float32 `json:"embedding"`
  721. }
  722. func (s *llmServer) Embedding(ctx context.Context, input string) ([]float32, error) {
  723. if err := s.sem.Acquire(ctx, 1); err != nil {
  724. if errors.Is(err, context.Canceled) {
  725. slog.Info("aborting embedding request due to client closing the connection")
  726. } else {
  727. slog.Error("Failed to acquire semaphore", "error", err)
  728. }
  729. return nil, err
  730. }
  731. defer s.sem.Release(1)
  732. // Make sure the server is ready
  733. status, err := s.getServerStatusRetry(ctx)
  734. if err != nil {
  735. return nil, err
  736. } else if status != ServerStatusReady {
  737. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  738. }
  739. data, err := json.Marshal(EmbeddingRequest{Content: input})
  740. if err != nil {
  741. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  742. }
  743. r, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
  744. if err != nil {
  745. return nil, fmt.Errorf("error creating embed request: %w", err)
  746. }
  747. r.Header.Set("Content-Type", "application/json")
  748. resp, err := http.DefaultClient.Do(r)
  749. if err != nil {
  750. return nil, fmt.Errorf("do embedding request: %w", err)
  751. }
  752. defer resp.Body.Close()
  753. body, err := io.ReadAll(resp.Body)
  754. if err != nil {
  755. return nil, fmt.Errorf("error reading embed response: %w", err)
  756. }
  757. if resp.StatusCode >= 400 {
  758. log.Printf("llm embedding error: %s", body)
  759. return nil, fmt.Errorf("%s", body)
  760. }
  761. var e EmbeddingResponse
  762. if err := json.Unmarshal(body, &e); err != nil {
  763. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  764. }
  765. return e.Embedding, nil
  766. }
  767. type TokenizeRequest struct {
  768. Content string `json:"content"`
  769. }
  770. type TokenizeResponse struct {
  771. Tokens []int `json:"tokens"`
  772. }
  773. func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
  774. s.modelLock.Lock()
  775. defer s.modelLock.Unlock()
  776. if s.model != nil {
  777. return s.model.Tokenize(content, false, true)
  778. }
  779. // Make sure the server is ready
  780. status, err := s.getServerStatus(ctx)
  781. if err != nil {
  782. return nil, err
  783. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  784. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  785. }
  786. data, err := json.Marshal(TokenizeRequest{Content: content})
  787. if err != nil {
  788. return nil, fmt.Errorf("marshaling encode data: %w", err)
  789. }
  790. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
  791. if err != nil {
  792. return nil, fmt.Errorf("encode request: %w", err)
  793. }
  794. req.Header.Set("Content-Type", "application/json")
  795. resp, err := http.DefaultClient.Do(req)
  796. if err != nil {
  797. return nil, fmt.Errorf("do encode request: %w", err)
  798. }
  799. defer resp.Body.Close()
  800. if resp.StatusCode == http.StatusNotFound {
  801. if s.model == nil {
  802. slog.Debug("new runner detected, loading model for cgo tokenization")
  803. m, err := llama.LoadModelFromFile(s.modelPath, llama.ModelParams{VocabOnly: true})
  804. if err != nil {
  805. return nil, err
  806. }
  807. s.model = m
  808. }
  809. return s.model.Tokenize(content, false, true)
  810. }
  811. body, err := io.ReadAll(resp.Body)
  812. if err != nil {
  813. return nil, fmt.Errorf("read encode request: %w", err)
  814. }
  815. if resp.StatusCode >= 400 {
  816. log.Printf("llm encode error: %s", body)
  817. return nil, fmt.Errorf("%s", body)
  818. }
  819. var encoded TokenizeResponse
  820. if err := json.Unmarshal(body, &encoded); err != nil {
  821. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  822. }
  823. return encoded.Tokens, nil
  824. }
  825. type DetokenizeRequest struct {
  826. Tokens []int `json:"tokens"`
  827. }
  828. type DetokenizeResponse struct {
  829. Content string `json:"content"`
  830. }
  831. func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
  832. s.modelLock.Lock()
  833. defer s.modelLock.Unlock()
  834. if s.model != nil {
  835. var resp string
  836. for _, token := range tokens {
  837. resp += s.model.TokenToPiece(token)
  838. }
  839. return resp, nil
  840. }
  841. // Make sure the server is ready
  842. status, err := s.getServerStatus(ctx)
  843. if err != nil {
  844. return "", err
  845. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  846. return "", fmt.Errorf("unexpected server status: %s", status.ToString())
  847. }
  848. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  849. if err != nil {
  850. return "", fmt.Errorf("marshaling decode data: %w", err)
  851. }
  852. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
  853. if err != nil {
  854. return "", fmt.Errorf("decode request: %w", err)
  855. }
  856. req.Header.Set("Content-Type", "application/json")
  857. resp, err := http.DefaultClient.Do(req)
  858. if err != nil {
  859. return "", fmt.Errorf("do decode request: %w", err)
  860. }
  861. defer resp.Body.Close()
  862. if resp.StatusCode == http.StatusNotFound {
  863. if s.model == nil {
  864. slog.Debug("new runner detected, loading model for cgo tokenization")
  865. m, err := llama.LoadModelFromFile(s.modelPath, llama.ModelParams{VocabOnly: true})
  866. if err != nil {
  867. return "", err
  868. }
  869. s.model = m
  870. }
  871. var resp string
  872. for _, token := range tokens {
  873. resp += s.model.TokenToPiece(token)
  874. }
  875. return resp, nil
  876. }
  877. body, err := io.ReadAll(resp.Body)
  878. if err != nil {
  879. return "", fmt.Errorf("read decode request: %w", err)
  880. }
  881. if resp.StatusCode >= 400 {
  882. log.Printf("llm decode error: %s", body)
  883. return "", fmt.Errorf("%s", body)
  884. }
  885. var decoded DetokenizeResponse
  886. if err := json.Unmarshal(body, &decoded); err != nil {
  887. return "", fmt.Errorf("unmarshal encode response: %w", err)
  888. }
  889. return decoded.Content, nil
  890. }
  891. func (s *llmServer) Close() error {
  892. s.modelLock.Lock()
  893. if s.model != nil {
  894. llama.FreeModel(s.model)
  895. s.model = nil
  896. }
  897. s.modelLock.Unlock()
  898. if s.cmd != nil {
  899. slog.Debug("stopping llama server")
  900. if err := s.cmd.Process.Kill(); err != nil {
  901. return err
  902. }
  903. // if ProcessState is already populated, Wait already completed, no need to wait again
  904. if s.cmd.ProcessState == nil {
  905. slog.Debug("waiting for llama server to exit")
  906. <-s.done
  907. }
  908. slog.Debug("llama server stopped")
  909. }
  910. return nil
  911. }
  912. func (s *llmServer) EstimatedVRAM() uint64 {
  913. return s.estimate.VRAMSize
  914. }
  915. func (s *llmServer) EstimatedTotal() uint64 {
  916. return s.estimate.TotalSize
  917. }
  918. func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
  919. for i, gpu := range s.gpus {
  920. if gpu.ID == gpuID {
  921. if i < len(s.estimate.GPUSizes) {
  922. return s.estimate.GPUSizes[i]
  923. }
  924. }
  925. }
  926. return 0
  927. }
  928. func parseDurationMs(ms float64) time.Duration {
  929. dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  930. if err != nil {
  931. panic(err)
  932. }
  933. return dur
  934. }