routes.go 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. package server
  2. import (
  3. "embed"
  4. "encoding/json"
  5. "errors"
  6. "fmt"
  7. "io"
  8. "log"
  9. "math"
  10. "net"
  11. "net/http"
  12. "os"
  13. "path"
  14. "runtime"
  15. "strings"
  16. "text/template"
  17. "github.com/gin-gonic/gin"
  18. "github.com/lithammer/fuzzysearch/fuzzy"
  19. "github.com/jmorganca/ollama/api"
  20. "github.com/jmorganca/ollama/llama"
  21. )
  22. //go:embed templates/*
  23. var templatesFS embed.FS
  24. var templates = template.Must(template.ParseFS(templatesFS, "templates/*.prompt"))
  25. func cacheDir() string {
  26. home, err := os.UserHomeDir()
  27. if err != nil {
  28. panic(err)
  29. }
  30. return path.Join(home, ".ollama")
  31. }
  32. func generate(c *gin.Context) {
  33. var req api.GenerateRequest
  34. if req.ModelOptions == nil {
  35. req.ModelOptions = &api.DefaultModelOptions
  36. }
  37. if req.PredictOptions == nil {
  38. req.PredictOptions = &api.DefaultPredictOptions
  39. }
  40. if err := c.ShouldBindJSON(&req); err != nil {
  41. c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
  42. return
  43. }
  44. if remoteModel, _ := getRemote(req.Model); remoteModel != nil {
  45. req.Model = remoteModel.FullName()
  46. }
  47. if _, err := os.Stat(req.Model); err != nil {
  48. if !errors.Is(err, os.ErrNotExist) {
  49. c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
  50. return
  51. }
  52. req.Model = path.Join(cacheDir(), "models", req.Model+".bin")
  53. }
  54. modelOpts := getModelOpts(req)
  55. modelOpts.NGPULayers = 1 // hard-code this for now
  56. model, err := llama.New(req.Model, modelOpts)
  57. if err != nil {
  58. fmt.Println("Loading the model failed:", err.Error())
  59. return
  60. }
  61. defer model.Free()
  62. templateNames := make([]string, 0, len(templates.Templates()))
  63. for _, template := range templates.Templates() {
  64. templateNames = append(templateNames, template.Name())
  65. }
  66. match, _ := matchRankOne(path.Base(req.Model), templateNames)
  67. if template := templates.Lookup(match); template != nil {
  68. var sb strings.Builder
  69. if err := template.Execute(&sb, req); err != nil {
  70. fmt.Println("Prompt template failed:", err.Error())
  71. return
  72. }
  73. req.Prompt = sb.String()
  74. }
  75. ch := make(chan string)
  76. model.SetTokenCallback(func(token string) bool {
  77. ch <- token
  78. return true
  79. })
  80. predictOpts := getPredictOpts(req)
  81. go func() {
  82. defer close(ch)
  83. _, err := model.Predict(req.Prompt, predictOpts)
  84. if err != nil {
  85. panic(err)
  86. }
  87. }()
  88. c.Stream(func(w io.Writer) bool {
  89. token, ok := <-ch
  90. if !ok {
  91. return false
  92. }
  93. resp := api.GenerateResponse{
  94. Response: token,
  95. }
  96. bts, err := json.Marshal(resp)
  97. if err != nil {
  98. return false
  99. }
  100. bts = append(bts, '\n')
  101. if _, err := w.Write(bts); err != nil {
  102. return false
  103. }
  104. return true
  105. })
  106. }
  107. func Serve(ln net.Listener) error {
  108. r := gin.Default()
  109. r.GET("/", func(c *gin.Context) {
  110. c.String(http.StatusOK, "Ollama is running")
  111. })
  112. r.POST("api/pull", func(c *gin.Context) {
  113. var req api.PullRequest
  114. if err := c.ShouldBindJSON(&req); err != nil {
  115. c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
  116. return
  117. }
  118. progressCh := make(chan api.PullProgress)
  119. go func() {
  120. defer close(progressCh)
  121. if err := pull(req.Model, progressCh); err != nil {
  122. var opError *net.OpError
  123. if errors.As(err, &opError) {
  124. result := api.PullProgress{
  125. Error: api.Error{
  126. Code: http.StatusBadGateway,
  127. Message: "failed to get models from directory",
  128. },
  129. }
  130. c.JSON(http.StatusBadGateway, result)
  131. return
  132. }
  133. c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
  134. return
  135. }
  136. }()
  137. c.Stream(func(w io.Writer) bool {
  138. progress, ok := <-progressCh
  139. if !ok {
  140. return false
  141. }
  142. bts, err := json.Marshal(progress)
  143. if err != nil {
  144. return false
  145. }
  146. bts = append(bts, '\n')
  147. if _, err := w.Write(bts); err != nil {
  148. return false
  149. }
  150. return true
  151. })
  152. })
  153. r.POST("/api/generate", generate)
  154. log.Printf("Listening on %s", ln.Addr())
  155. s := &http.Server{
  156. Handler: r,
  157. }
  158. return s.Serve(ln)
  159. }
  160. func matchRankOne(source string, targets []string) (bestMatch string, bestRank int) {
  161. bestRank = math.MaxInt
  162. for _, target := range targets {
  163. if rank := fuzzy.LevenshteinDistance(source, target); bestRank > rank {
  164. bestRank = rank
  165. bestMatch = target
  166. }
  167. }
  168. return
  169. }
  170. func getModelOpts(req api.GenerateRequest) llama.ModelOptions {
  171. var opts llama.ModelOptions
  172. opts.ContextSize = req.ModelOptions.ContextSize
  173. opts.Seed = req.ModelOptions.Seed
  174. opts.F16Memory = req.ModelOptions.F16Memory
  175. opts.MLock = req.ModelOptions.MLock
  176. opts.Embeddings = req.ModelOptions.Embeddings
  177. opts.MMap = req.ModelOptions.MMap
  178. opts.LowVRAM = req.ModelOptions.LowVRAM
  179. opts.NBatch = req.ModelOptions.NBatch
  180. opts.VocabOnly = req.ModelOptions.VocabOnly
  181. opts.NUMA = req.ModelOptions.NUMA
  182. opts.NGPULayers = req.ModelOptions.NGPULayers
  183. opts.MainGPU = req.ModelOptions.MainGPU
  184. opts.TensorSplit = req.ModelOptions.TensorSplit
  185. return opts
  186. }
  187. func getPredictOpts(req api.GenerateRequest) llama.PredictOptions {
  188. var opts llama.PredictOptions
  189. if req.PredictOptions.Threads == -1 {
  190. opts.Threads = runtime.NumCPU()
  191. } else {
  192. opts.Threads = req.PredictOptions.Threads
  193. }
  194. opts.Seed = req.PredictOptions.Seed
  195. opts.Tokens = req.PredictOptions.Tokens
  196. opts.Penalty = req.PredictOptions.Penalty
  197. opts.Repeat = req.PredictOptions.Repeat
  198. opts.Batch = req.PredictOptions.Batch
  199. opts.NKeep = req.PredictOptions.NKeep
  200. opts.TopK = req.PredictOptions.TopK
  201. opts.TopP = req.PredictOptions.TopP
  202. opts.TailFreeSamplingZ = req.PredictOptions.TailFreeSamplingZ
  203. opts.TypicalP = req.PredictOptions.TypicalP
  204. opts.Temperature = req.PredictOptions.Temperature
  205. opts.FrequencyPenalty = req.PredictOptions.FrequencyPenalty
  206. opts.PresencePenalty = req.PredictOptions.PresencePenalty
  207. opts.Mirostat = req.PredictOptions.Mirostat
  208. opts.MirostatTAU = req.PredictOptions.MirostatTAU
  209. opts.MirostatETA = req.PredictOptions.MirostatETA
  210. opts.MMap = req.PredictOptions.MMap
  211. return opts
  212. }