server.go 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "log"
  11. "log/slog"
  12. "math/rand"
  13. "net"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path/filepath"
  18. "runtime"
  19. "strconv"
  20. "strings"
  21. "time"
  22. "golang.org/x/sync/semaphore"
  23. "github.com/ollama/ollama/api"
  24. "github.com/ollama/ollama/envconfig"
  25. "github.com/ollama/ollama/format"
  26. "github.com/ollama/ollama/gpu"
  27. )
  28. type LlamaServer interface {
  29. Ping(ctx context.Context) error
  30. WaitUntilRunning(ctx context.Context) error
  31. Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
  32. Embedding(ctx context.Context, prompt string) ([]float64, error)
  33. Tokenize(ctx context.Context, content string) ([]int, error)
  34. Detokenize(ctx context.Context, tokens []int) (string, error)
  35. Close() error
  36. EstimatedVRAM() uint64 // Total VRAM across all GPUs
  37. EstimatedTotal() uint64
  38. EstimatedVRAMByGPU(gpuID string) uint64
  39. }
  40. // llmServer is an instance of the llama.cpp server
  41. type llmServer struct {
  42. port int
  43. cmd *exec.Cmd
  44. done chan error // Channel to signal when the process exits
  45. status *StatusWriter
  46. options api.Options
  47. estimate MemoryEstimate
  48. totalLayers uint64
  49. // gpuCount int
  50. gpus gpu.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
  51. loadDuration time.Duration // Record how long it took the model to load
  52. loadProgress float32
  53. sem *semaphore.Weighted
  54. }
  55. // LoadModel will load a model from disk. The model must be in the GGML format.
  56. //
  57. // It collects array values for arrays with a size less than or equal to
  58. // maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
  59. // the maxArraySize is negative, all arrays are collected.
  60. func LoadModel(model string, maxArraySize int) (*GGML, error) {
  61. if _, err := os.Stat(model); err != nil {
  62. return nil, err
  63. }
  64. f, err := os.Open(model)
  65. if err != nil {
  66. return nil, err
  67. }
  68. defer f.Close()
  69. ggml, _, err := DecodeGGML(f, maxArraySize)
  70. return ggml, err
  71. }
  72. // NewLlamaServer will run a server for the given GPUs
  73. // The gpu list must be a single family.
  74. func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
  75. var err error
  76. var cpuRunner string
  77. var estimate MemoryEstimate
  78. var systemTotalMemory uint64
  79. var systemFreeMemory uint64
  80. systemMemInfo, err := gpu.GetCPUMem()
  81. if err != nil {
  82. slog.Error("failed to lookup system memory", "error", err)
  83. } else {
  84. systemTotalMemory = systemMemInfo.TotalMemory
  85. systemFreeMemory = systemMemInfo.FreeMemory
  86. slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", systemFreeMemory)
  87. }
  88. // If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
  89. if opts.NumGPU == 0 {
  90. gpus = gpu.GetCPUInfo()
  91. }
  92. if len(gpus) == 1 && gpus[0].Library == "cpu" {
  93. cpuRunner = serverForCpu()
  94. estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
  95. } else {
  96. estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
  97. switch {
  98. case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
  99. // disable partial offloading when model is greater than total system memory as this
  100. // can lead to locking up the system
  101. opts.NumGPU = 0
  102. case gpus[0].Library != "metal" && estimate.Layers == 0:
  103. // Don't bother loading into the GPU if no layers can fit
  104. cpuRunner = serverForCpu()
  105. gpus = gpu.GetCPUInfo()
  106. case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
  107. opts.NumGPU = estimate.Layers
  108. }
  109. }
  110. estimate.log()
  111. // Loop through potential servers
  112. finalErr := errors.New("no suitable llama servers found")
  113. if len(adapters) > 1 {
  114. return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
  115. }
  116. availableServers := availableServers()
  117. var servers []string
  118. if cpuRunner != "" {
  119. servers = []string{cpuRunner}
  120. } else {
  121. servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
  122. }
  123. demandLib := envconfig.LLMLibrary
  124. if demandLib != "" {
  125. serverPath := availableServers[demandLib]
  126. if serverPath == "" {
  127. slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
  128. } else {
  129. slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
  130. servers = []string{demandLib}
  131. if strings.HasPrefix(demandLib, "cpu") {
  132. // Omit the GPU flag to silence the warning
  133. opts.NumGPU = -1
  134. }
  135. }
  136. }
  137. if len(servers) == 0 {
  138. return nil, fmt.Errorf("no servers found for %v", gpus)
  139. }
  140. params := []string{
  141. "--model", model,
  142. "--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
  143. "--batch-size", fmt.Sprintf("%d", opts.NumBatch),
  144. "--embedding",
  145. }
  146. params = append(params, "--log-disable")
  147. if opts.NumGPU >= 0 {
  148. params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
  149. }
  150. if envconfig.Debug {
  151. params = append(params, "--verbose")
  152. }
  153. if opts.MainGPU > 0 {
  154. params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
  155. }
  156. if len(adapters) > 0 {
  157. // TODO: applying multiple adapters is not supported by the llama.cpp server yet
  158. params = append(params, "--lora", adapters[0])
  159. }
  160. if len(projectors) > 0 {
  161. // TODO: applying multiple projectors is not supported by the llama.cpp server yet
  162. params = append(params, "--mmproj", projectors[0])
  163. }
  164. if opts.NumThread > 0 {
  165. params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
  166. }
  167. if !opts.F16KV {
  168. params = append(params, "--memory-f32")
  169. }
  170. flashAttnEnabled := envconfig.FlashAttention
  171. for _, g := range gpus {
  172. // only cuda (compute capability 7+) and metal support flash attention
  173. if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
  174. flashAttnEnabled = false
  175. }
  176. // mmap has issues with partial offloading on metal
  177. if g.Library == "metal" &&
  178. uint64(opts.NumGPU) > 0 &&
  179. uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
  180. opts.UseMMap = api.TriStateFalse
  181. }
  182. }
  183. if flashAttnEnabled {
  184. params = append(params, "--flash-attn")
  185. }
  186. // Windows CUDA should not use mmap for best performance
  187. // Linux with a model larger than free space, mmap leads to thrashing
  188. if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == api.TriStateUndefined) ||
  189. (runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == api.TriStateUndefined) ||
  190. opts.UseMMap == api.TriStateFalse {
  191. params = append(params, "--no-mmap")
  192. }
  193. if opts.UseMLock {
  194. params = append(params, "--mlock")
  195. }
  196. if opts.UseNUMA {
  197. params = append(params, "--numa")
  198. }
  199. numParallel := envconfig.NumParallel
  200. // TODO (jmorganca): multimodal models don't support parallel yet
  201. // see https://github.com/ollama/ollama/issues/4165
  202. if len(projectors) > 0 {
  203. numParallel = 1
  204. slog.Warn("multimodal models don't support parallel requests yet")
  205. }
  206. params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
  207. if estimate.TensorSplit != "" {
  208. params = append(params, "--tensor-split", estimate.TensorSplit)
  209. }
  210. if estimate.TensorSplit != "" {
  211. params = append(params, "--tensor-split", estimate.TensorSplit)
  212. }
  213. for i := range len(servers) {
  214. dir := availableServers[servers[i]]
  215. if dir == "" {
  216. // Shouldn't happen
  217. finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
  218. slog.Error("server list inconsistent", "error", finalErr)
  219. continue
  220. }
  221. if strings.HasPrefix(servers[i], "cpu") {
  222. gpus = gpu.GetCPUInfo()
  223. }
  224. // Find an availableServers port, retry on each iteration in case the failure was a port conflict race
  225. port := 0
  226. if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
  227. var l *net.TCPListener
  228. if l, err = net.ListenTCP("tcp", a); err == nil {
  229. port = l.Addr().(*net.TCPAddr).Port
  230. l.Close()
  231. }
  232. }
  233. if port == 0 {
  234. slog.Debug("ResolveTCPAddr failed ", "error", err)
  235. port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  236. }
  237. finalParams := append(params, "--port", strconv.Itoa(port))
  238. pathEnv := "LD_LIBRARY_PATH"
  239. if runtime.GOOS == "windows" {
  240. pathEnv = "PATH"
  241. }
  242. // prepend the server directory to LD_LIBRARY_PATH/PATH and the parent dir for common dependencies
  243. libraryPaths := []string{dir, filepath.Dir(dir)}
  244. if libraryPath, ok := os.LookupEnv(pathEnv); ok {
  245. // Append our runner directory to the path
  246. // This will favor system libraries over our bundled library dependencies
  247. libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
  248. }
  249. // Note: we always put the dependency path first
  250. // since this was the exact version we verified for AMD GPUs
  251. // and we favor what the user had in their path
  252. if gpus[0].DependencyPath != "" {
  253. // TODO refine for multi-gpu support
  254. libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
  255. }
  256. server := filepath.Join(dir, "ollama_llama_server")
  257. if runtime.GOOS == "windows" {
  258. server += ".exe"
  259. }
  260. // Detect tmp cleaners wiping out the file
  261. _, err := os.Stat(server)
  262. if errors.Is(err, os.ErrNotExist) {
  263. slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
  264. err = Init()
  265. if err != nil {
  266. slog.Warn("failed to reinitialize payloads", "error", err)
  267. return nil, err
  268. }
  269. }
  270. s := &llmServer{
  271. port: port,
  272. cmd: exec.Command(server, finalParams...),
  273. status: NewStatusWriter(os.Stderr),
  274. options: opts,
  275. estimate: estimate,
  276. sem: semaphore.NewWeighted(int64(numParallel)),
  277. totalLayers: ggml.KV().BlockCount() + 1,
  278. gpus: gpus,
  279. done: make(chan error, 1),
  280. }
  281. s.cmd.Env = os.Environ()
  282. s.cmd.Stdout = os.Stdout
  283. s.cmd.Stderr = s.status
  284. envWorkarounds := [][2]string{}
  285. for _, gpu := range gpus {
  286. envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
  287. }
  288. visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
  289. pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))
  290. // Update or add the path and visible devices variable with our adjusted version
  291. pathNeeded := true
  292. devicesNeeded := visibleDevicesEnv != ""
  293. for i := range s.cmd.Env {
  294. cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
  295. if strings.EqualFold(cmp[0], pathEnv) {
  296. s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
  297. pathNeeded = false
  298. } else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
  299. s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
  300. devicesNeeded = false
  301. } else if len(envWorkarounds) != 0 {
  302. for _, kv := range envWorkarounds {
  303. if strings.EqualFold(cmp[0], kv[0]) {
  304. s.cmd.Env[i] = kv[0] + "=" + kv[1]
  305. }
  306. }
  307. }
  308. }
  309. if pathNeeded {
  310. s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
  311. }
  312. if devicesNeeded {
  313. s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
  314. }
  315. slog.Info("starting llama server", "cmd", s.cmd.String())
  316. if envconfig.Debug {
  317. filteredEnv := []string{}
  318. for _, ev := range s.cmd.Env {
  319. if strings.HasPrefix(ev, "CUDA_") ||
  320. strings.HasPrefix(ev, "ROCM_") ||
  321. strings.HasPrefix(ev, "HIP_") ||
  322. strings.HasPrefix(ev, "HSA_") ||
  323. strings.HasPrefix(ev, "GGML_") ||
  324. strings.HasPrefix(ev, "PATH=") ||
  325. strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
  326. filteredEnv = append(filteredEnv, ev)
  327. }
  328. }
  329. // Log at debug as the environment is inherited and might contain sensitive information
  330. slog.Debug("subprocess", "environment", filteredEnv)
  331. }
  332. if err = s.cmd.Start(); err != nil {
  333. // Detect permission denied and augment them essage about noexec
  334. if errors.Is(err, os.ErrPermission) {
  335. finalErr = fmt.Errorf("unable to start server %w. %s may have noexec set. Set OLLAMA_TMPDIR for server to a writable executable directory", err, dir)
  336. continue
  337. }
  338. msg := ""
  339. if s.status != nil && s.status.LastErrMsg != "" {
  340. msg = s.status.LastErrMsg
  341. }
  342. err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
  343. finalErr = err
  344. continue
  345. }
  346. // reap subprocess when it exits
  347. go func() {
  348. s.done <- s.cmd.Wait()
  349. }()
  350. return s, nil
  351. }
  352. slog.Error("unable to load any llama server", "error", finalErr)
  353. return nil, finalErr
  354. }
  355. func projectorMemoryRequirements(filename string) uint64 {
  356. file, err := os.Open(filename)
  357. if err != nil {
  358. return 0
  359. }
  360. defer file.Close()
  361. ggml, _, err := DecodeGGML(file, 0)
  362. if err != nil {
  363. return 0
  364. }
  365. var mem uint64
  366. for _, layer := range ggml.Tensors().Layers() {
  367. mem += layer.size()
  368. }
  369. return mem
  370. }
  371. type ServerStatus int
  372. const ( // iota is reset to 0
  373. ServerStatusReady ServerStatus = iota
  374. ServerStatusNoSlotsAvailable
  375. ServerStatusLoadingModel
  376. ServerStatusNotResponding
  377. ServerStatusError
  378. )
  379. func (s ServerStatus) ToString() string {
  380. switch s {
  381. case ServerStatusReady:
  382. return "llm server ready"
  383. case ServerStatusNoSlotsAvailable:
  384. return "llm busy - no slots available"
  385. case ServerStatusLoadingModel:
  386. return "llm server loading model"
  387. case ServerStatusNotResponding:
  388. return "llm server not responding"
  389. default:
  390. return "llm server error"
  391. }
  392. }
  393. type ServerStatusResp struct {
  394. Status string `json:"status"`
  395. SlotsIdle int `json:"slots_idle"`
  396. SlotsProcessing int `json:"slots_processing"`
  397. Error string `json:"error"`
  398. Progress float32 `json:"progress"`
  399. }
  400. func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
  401. // Fail fast if its exited
  402. if s.cmd.ProcessState != nil {
  403. msg := ""
  404. if s.status != nil && s.status.LastErrMsg != "" {
  405. msg = s.status.LastErrMsg
  406. }
  407. if s.cmd.ProcessState.ExitCode() == -1 {
  408. // Most likely a signal killed it, log some more details to try to help troubleshoot
  409. slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
  410. }
  411. return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  412. }
  413. req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
  414. if err != nil {
  415. return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
  416. }
  417. req.Header.Set("Content-Type", "application/json")
  418. resp, err := http.DefaultClient.Do(req)
  419. if err != nil {
  420. if errors.Is(err, context.DeadlineExceeded) {
  421. return ServerStatusNotResponding, errors.New("server not responding")
  422. }
  423. return ServerStatusError, fmt.Errorf("health resp: %w", err)
  424. }
  425. defer resp.Body.Close()
  426. body, err := io.ReadAll(resp.Body)
  427. if err != nil {
  428. return ServerStatusError, fmt.Errorf("read health request: %w", err)
  429. }
  430. var status ServerStatusResp
  431. if err := json.Unmarshal(body, &status); err != nil {
  432. return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
  433. }
  434. switch status.Status {
  435. case "ok":
  436. return ServerStatusReady, nil
  437. case "no slot available":
  438. return ServerStatusNoSlotsAvailable, nil
  439. case "loading model":
  440. s.loadProgress = status.Progress
  441. return ServerStatusLoadingModel, nil
  442. default:
  443. return ServerStatusError, fmt.Errorf("server error: %+v", status)
  444. }
  445. }
  446. // getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
  447. func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
  448. var retries int
  449. for {
  450. status, err := s.getServerStatus(ctx)
  451. if err != nil {
  452. return status, err
  453. }
  454. if status == ServerStatusNoSlotsAvailable {
  455. if retries >= 10 {
  456. return status, fmt.Errorf("no slots available after %d retries", retries)
  457. }
  458. time.Sleep(5 * time.Millisecond)
  459. retries++
  460. continue
  461. }
  462. return status, nil
  463. }
  464. }
  465. func (s *llmServer) Ping(ctx context.Context) error {
  466. _, err := s.getServerStatus(ctx)
  467. if err != nil {
  468. slog.Debug("server unhealthy", "error", err)
  469. return err
  470. }
  471. return nil
  472. }
  473. func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
  474. start := time.Now()
  475. stallDuration := 5 * time.Minute // If no progress happens
  476. finalLoadDuration := 5 * time.Minute // After we hit 100%, give the runner more time to come online
  477. stallTimer := time.Now().Add(stallDuration) // give up if we stall
  478. slog.Info("waiting for llama runner to start responding")
  479. var lastStatus ServerStatus = -1
  480. fullyLoaded := false
  481. for {
  482. select {
  483. case <-ctx.Done():
  484. slog.Warn("client connection closed before server finished loading, aborting load")
  485. return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
  486. case err := <-s.done:
  487. msg := ""
  488. if s.status != nil && s.status.LastErrMsg != "" {
  489. msg = s.status.LastErrMsg
  490. }
  491. return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
  492. default:
  493. }
  494. if time.Now().After(stallTimer) {
  495. // timeout
  496. msg := ""
  497. if s.status != nil && s.status.LastErrMsg != "" {
  498. msg = s.status.LastErrMsg
  499. }
  500. return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
  501. }
  502. if s.cmd.ProcessState != nil {
  503. msg := ""
  504. if s.status != nil && s.status.LastErrMsg != "" {
  505. msg = s.status.LastErrMsg
  506. }
  507. return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  508. }
  509. ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
  510. defer cancel()
  511. priorProgress := s.loadProgress
  512. status, _ := s.getServerStatus(ctx)
  513. if lastStatus != status && status != ServerStatusReady {
  514. // Only log on status changes
  515. slog.Info("waiting for server to become available", "status", status.ToString())
  516. }
  517. switch status {
  518. case ServerStatusReady:
  519. s.loadDuration = time.Since(start)
  520. slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
  521. return nil
  522. default:
  523. lastStatus = status
  524. // Reset the timer as long as we're making forward progress on the load
  525. if priorProgress != s.loadProgress {
  526. slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
  527. stallTimer = time.Now().Add(stallDuration)
  528. } else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
  529. slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
  530. stallTimer = time.Now().Add(finalLoadDuration)
  531. fullyLoaded = true
  532. }
  533. time.Sleep(time.Millisecond * 250)
  534. continue
  535. }
  536. }
  537. }
  538. const jsonGrammar = `
  539. root ::= object
  540. value ::= object | array | string | number | ("true" | "false" | "null") ws
  541. object ::=
  542. "{" ws (
  543. string ":" ws value
  544. ("," ws string ":" ws value)*
  545. )? "}" ws
  546. array ::=
  547. "[" ws (
  548. value
  549. ("," ws value)*
  550. )? "]" ws
  551. string ::=
  552. "\"" (
  553. [^"\\\x7F\x00-\x1F] |
  554. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  555. )* "\"" ws
  556. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  557. # Optional space: by convention, applied in this grammar after literal chars when allowed
  558. ws ::= ([ \t\n] ws)?
  559. `
  560. const maxBufferSize = 512 * format.KiloByte
  561. type ImageData struct {
  562. Data []byte `json:"data"`
  563. ID int `json:"id"`
  564. }
  565. type completion struct {
  566. Content string `json:"content"`
  567. Model string `json:"model"`
  568. Prompt string `json:"prompt"`
  569. Stop bool `json:"stop"`
  570. StoppedLimit bool `json:"stopped_limit"`
  571. Timings struct {
  572. PredictedN int `json:"predicted_n"`
  573. PredictedMS float64 `json:"predicted_ms"`
  574. PromptN int `json:"prompt_n"`
  575. PromptMS float64 `json:"prompt_ms"`
  576. }
  577. }
  578. type CompletionRequest struct {
  579. Prompt string
  580. Format string
  581. Images []ImageData
  582. Options api.Options
  583. }
  584. type CompletionResponse struct {
  585. Content string
  586. DoneReason string
  587. Done bool
  588. PromptEvalCount int
  589. PromptEvalDuration time.Duration
  590. EvalCount int
  591. EvalDuration time.Duration
  592. }
  593. func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
  594. if err := s.sem.Acquire(ctx, 1); err != nil {
  595. slog.Error("Failed to acquire semaphore", "error", err)
  596. return err
  597. }
  598. defer s.sem.Release(1)
  599. // only allow maximum 10 "context shifts" to avoid infinite generation
  600. if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
  601. req.Options.NumPredict = 10 * s.options.NumCtx
  602. slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
  603. }
  604. request := map[string]any{
  605. "prompt": req.Prompt,
  606. "stream": true,
  607. "n_predict": req.Options.NumPredict,
  608. "n_keep": req.Options.NumKeep,
  609. "main_gpu": req.Options.MainGPU,
  610. "temperature": req.Options.Temperature,
  611. "top_k": req.Options.TopK,
  612. "top_p": req.Options.TopP,
  613. "tfs_z": req.Options.TFSZ,
  614. "typical_p": req.Options.TypicalP,
  615. "repeat_last_n": req.Options.RepeatLastN,
  616. "repeat_penalty": req.Options.RepeatPenalty,
  617. "presence_penalty": req.Options.PresencePenalty,
  618. "frequency_penalty": req.Options.FrequencyPenalty,
  619. "mirostat": req.Options.Mirostat,
  620. "mirostat_tau": req.Options.MirostatTau,
  621. "mirostat_eta": req.Options.MirostatEta,
  622. "penalize_nl": req.Options.PenalizeNewline,
  623. "seed": req.Options.Seed,
  624. "stop": req.Options.Stop,
  625. "image_data": req.Images,
  626. "cache_prompt": true,
  627. }
  628. // Make sure the server is ready
  629. status, err := s.getServerStatusRetry(ctx)
  630. if err != nil {
  631. return err
  632. } else if status != ServerStatusReady {
  633. return fmt.Errorf("unexpected server status: %s", status.ToString())
  634. }
  635. if req.Format == "json" {
  636. request["grammar"] = jsonGrammar
  637. if !strings.Contains(strings.ToLower(req.Prompt), "json") {
  638. slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
  639. }
  640. }
  641. // Handling JSON marshaling with special characters unescaped.
  642. buffer := &bytes.Buffer{}
  643. enc := json.NewEncoder(buffer)
  644. enc.SetEscapeHTML(false)
  645. if err := enc.Encode(request); err != nil {
  646. return fmt.Errorf("failed to marshal data: %v", err)
  647. }
  648. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
  649. serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  650. if err != nil {
  651. return fmt.Errorf("error creating POST request: %v", err)
  652. }
  653. serverReq.Header.Set("Content-Type", "application/json")
  654. res, err := http.DefaultClient.Do(serverReq)
  655. if err != nil {
  656. return fmt.Errorf("POST predict: %v", err)
  657. }
  658. defer res.Body.Close()
  659. if res.StatusCode >= 400 {
  660. bodyBytes, err := io.ReadAll(res.Body)
  661. if err != nil {
  662. return fmt.Errorf("failed reading llm error response: %w", err)
  663. }
  664. log.Printf("llm predict error: %s", bodyBytes)
  665. return fmt.Errorf("%s", bodyBytes)
  666. }
  667. scanner := bufio.NewScanner(res.Body)
  668. buf := make([]byte, 0, maxBufferSize)
  669. scanner.Buffer(buf, maxBufferSize)
  670. // keep track of the last token generated, this is used to abort if the model starts looping
  671. var lastToken string
  672. var tokenRepeat int
  673. for scanner.Scan() {
  674. select {
  675. case <-ctx.Done():
  676. // This handles the request cancellation
  677. return ctx.Err()
  678. default:
  679. line := scanner.Bytes()
  680. if len(line) == 0 {
  681. continue
  682. }
  683. evt, ok := bytes.CutPrefix(line, []byte("data: "))
  684. if !ok {
  685. return fmt.Errorf("error parsing llm response stream: %s", line)
  686. }
  687. var c completion
  688. if err := json.Unmarshal(evt, &c); err != nil {
  689. return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
  690. }
  691. switch {
  692. case strings.TrimSpace(c.Content) == lastToken:
  693. tokenRepeat++
  694. default:
  695. lastToken = strings.TrimSpace(c.Content)
  696. tokenRepeat = 0
  697. }
  698. // 30 picked as an arbitrary max token repeat limit, modify as needed
  699. if tokenRepeat > 30 {
  700. slog.Debug("prediction aborted, token repeat limit reached")
  701. return ctx.Err()
  702. }
  703. if c.Content != "" {
  704. fn(CompletionResponse{
  705. Content: c.Content,
  706. })
  707. }
  708. if c.Stop {
  709. doneReason := "stop"
  710. if c.StoppedLimit {
  711. doneReason = "length"
  712. }
  713. fn(CompletionResponse{
  714. Done: true,
  715. DoneReason: doneReason,
  716. PromptEvalCount: c.Timings.PromptN,
  717. PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
  718. EvalCount: c.Timings.PredictedN,
  719. EvalDuration: parseDurationMs(c.Timings.PredictedMS),
  720. })
  721. return nil
  722. }
  723. }
  724. }
  725. if err := scanner.Err(); err != nil {
  726. if strings.Contains(err.Error(), "unexpected EOF") {
  727. s.Close()
  728. msg := ""
  729. if s.status != nil && s.status.LastErrMsg != "" {
  730. msg = s.status.LastErrMsg
  731. }
  732. return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
  733. }
  734. return fmt.Errorf("error reading llm response: %v", err)
  735. }
  736. return nil
  737. }
  738. type EmbeddingRequest struct {
  739. Content string `json:"content"`
  740. }
  741. type EmbeddingResponse struct {
  742. Embedding []float64 `json:"embedding"`
  743. }
  744. func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
  745. if err := s.sem.Acquire(ctx, 1); err != nil {
  746. slog.Error("Failed to acquire semaphore", "error", err)
  747. return nil, err
  748. }
  749. defer s.sem.Release(1)
  750. // Make sure the server is ready
  751. status, err := s.getServerStatusRetry(ctx)
  752. if err != nil {
  753. return nil, err
  754. } else if status != ServerStatusReady {
  755. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  756. }
  757. data, err := json.Marshal(TokenizeRequest{Content: prompt})
  758. if err != nil {
  759. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  760. }
  761. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
  762. if err != nil {
  763. return nil, fmt.Errorf("error creating embed request: %w", err)
  764. }
  765. req.Header.Set("Content-Type", "application/json")
  766. resp, err := http.DefaultClient.Do(req)
  767. if err != nil {
  768. return nil, fmt.Errorf("do embedding request: %w", err)
  769. }
  770. defer resp.Body.Close()
  771. body, err := io.ReadAll(resp.Body)
  772. if err != nil {
  773. return nil, fmt.Errorf("error reading embed response: %w", err)
  774. }
  775. if resp.StatusCode >= 400 {
  776. log.Printf("llm encode error: %s", body)
  777. return nil, fmt.Errorf("%s", body)
  778. }
  779. var embedding EmbeddingResponse
  780. if err := json.Unmarshal(body, &embedding); err != nil {
  781. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  782. }
  783. return embedding.Embedding, nil
  784. }
  785. type TokenizeRequest struct {
  786. Content string `json:"content"`
  787. }
  788. type TokenizeResponse struct {
  789. Tokens []int `json:"tokens"`
  790. }
  791. func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
  792. // Make sure the server is ready
  793. status, err := s.getServerStatus(ctx)
  794. if err != nil {
  795. return nil, err
  796. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  797. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  798. }
  799. data, err := json.Marshal(TokenizeRequest{Content: content})
  800. if err != nil {
  801. return nil, fmt.Errorf("marshaling encode data: %w", err)
  802. }
  803. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
  804. if err != nil {
  805. return nil, fmt.Errorf("encode request: %w", err)
  806. }
  807. req.Header.Set("Content-Type", "application/json")
  808. resp, err := http.DefaultClient.Do(req)
  809. if err != nil {
  810. return nil, fmt.Errorf("do encode request: %w", err)
  811. }
  812. defer resp.Body.Close()
  813. body, err := io.ReadAll(resp.Body)
  814. if err != nil {
  815. return nil, fmt.Errorf("read encode request: %w", err)
  816. }
  817. if resp.StatusCode >= 400 {
  818. log.Printf("llm encode error: %s", body)
  819. return nil, fmt.Errorf("%s", body)
  820. }
  821. var encoded TokenizeResponse
  822. if err := json.Unmarshal(body, &encoded); err != nil {
  823. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  824. }
  825. return encoded.Tokens, nil
  826. }
  827. type DetokenizeRequest struct {
  828. Tokens []int `json:"tokens"`
  829. }
  830. type DetokenizeResponse struct {
  831. Content string `json:"content"`
  832. }
  833. func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
  834. // Make sure the server is ready
  835. status, err := s.getServerStatus(ctx)
  836. if err != nil {
  837. return "", err
  838. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  839. return "", fmt.Errorf("unexpected server status: %s", status.ToString())
  840. }
  841. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  842. if err != nil {
  843. return "", fmt.Errorf("marshaling decode data: %w", err)
  844. }
  845. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
  846. if err != nil {
  847. return "", fmt.Errorf("decode request: %w", err)
  848. }
  849. req.Header.Set("Content-Type", "application/json")
  850. resp, err := http.DefaultClient.Do(req)
  851. if err != nil {
  852. return "", fmt.Errorf("do decode request: %w", err)
  853. }
  854. defer resp.Body.Close()
  855. body, err := io.ReadAll(resp.Body)
  856. if err != nil {
  857. return "", fmt.Errorf("read decode request: %w", err)
  858. }
  859. if resp.StatusCode >= 400 {
  860. log.Printf("llm decode error: %s", body)
  861. return "", fmt.Errorf("%s", body)
  862. }
  863. var decoded DetokenizeResponse
  864. if err := json.Unmarshal(body, &decoded); err != nil {
  865. return "", fmt.Errorf("unmarshal encode response: %w", err)
  866. }
  867. return decoded.Content, nil
  868. }
  869. func (s *llmServer) Close() error {
  870. if s.cmd != nil {
  871. slog.Debug("stopping llama server")
  872. if err := s.cmd.Process.Kill(); err != nil {
  873. return err
  874. }
  875. // if ProcessState is already populated, Wait already completed, no need to wait again
  876. if s.cmd.ProcessState == nil {
  877. slog.Debug("waiting for llama server to exit")
  878. <-s.done
  879. }
  880. slog.Debug("llama server stopped")
  881. }
  882. return nil
  883. }
  884. func (s *llmServer) EstimatedVRAM() uint64 {
  885. return s.estimate.VRAMSize
  886. }
  887. func (s *llmServer) EstimatedTotal() uint64 {
  888. return s.estimate.TotalSize
  889. }
  890. func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
  891. for i, gpu := range s.gpus {
  892. if gpu.ID == gpuID {
  893. return s.estimate.GPUSizes[i]
  894. }
  895. }
  896. return 0
  897. }
  898. func parseDurationMs(ms float64) time.Duration {
  899. dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  900. if err != nil {
  901. panic(err)
  902. }
  903. return dur
  904. }