openai.go 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. // openai package provides middleware for partial compatibility with the OpenAI REST API
  2. package openai
  3. import (
  4. "bytes"
  5. "encoding/json"
  6. "fmt"
  7. "io"
  8. "math/rand"
  9. "net/http"
  10. "time"
  11. "github.com/gin-gonic/gin"
  12. "github.com/ollama/ollama/api"
  13. )
  14. type Error struct {
  15. Message string `json:"message"`
  16. Type string `json:"type"`
  17. Param interface{} `json:"param"`
  18. Code *string `json:"code"`
  19. }
  20. type ErrorResponse struct {
  21. Error Error `json:"error"`
  22. }
  23. type Message struct {
  24. Role string `json:"role"`
  25. Content string `json:"content"`
  26. }
  27. type Choice struct {
  28. Index int `json:"index"`
  29. Message Message `json:"message"`
  30. FinishReason *string `json:"finish_reason"`
  31. }
  32. type ChunkChoice struct {
  33. Index int `json:"index"`
  34. Delta Message `json:"delta"`
  35. FinishReason *string `json:"finish_reason"`
  36. }
  37. type Usage struct {
  38. PromptTokens int `json:"prompt_tokens"`
  39. CompletionTokens int `json:"completion_tokens"`
  40. TotalTokens int `json:"total_tokens"`
  41. }
  42. type ResponseFormat struct {
  43. Type string `json:"type"`
  44. }
  45. type ChatCompletionRequest struct {
  46. Model string `json:"model"`
  47. Messages []Message `json:"messages"`
  48. Stream bool `json:"stream"`
  49. MaxTokens *int `json:"max_tokens"`
  50. Seed *int `json:"seed"`
  51. Stop any `json:"stop"`
  52. Temperature *float64 `json:"temperature"`
  53. FrequencyPenalty *float64 `json:"frequency_penalty"`
  54. PresencePenalty *float64 `json:"presence_penalty_penalty"`
  55. TopP *float64 `json:"top_p"`
  56. ResponseFormat *ResponseFormat `json:"response_format"`
  57. }
  58. type ChatCompletion struct {
  59. Id string `json:"id"`
  60. Object string `json:"object"`
  61. Created int64 `json:"created"`
  62. Model string `json:"model"`
  63. SystemFingerprint string `json:"system_fingerprint"`
  64. Choices []Choice `json:"choices"`
  65. Usage Usage `json:"usage,omitempty"`
  66. }
  67. type ChatCompletionChunk struct {
  68. Id string `json:"id"`
  69. Object string `json:"object"`
  70. Created int64 `json:"created"`
  71. Model string `json:"model"`
  72. SystemFingerprint string `json:"system_fingerprint"`
  73. Choices []ChunkChoice `json:"choices"`
  74. }
  75. func NewError(code int, message string) ErrorResponse {
  76. var etype string
  77. switch code {
  78. case http.StatusBadRequest:
  79. etype = "invalid_request_error"
  80. case http.StatusNotFound:
  81. etype = "not_found_error"
  82. default:
  83. etype = "api_error"
  84. }
  85. return ErrorResponse{Error{Type: etype, Message: message}}
  86. }
  87. func toChatCompletion(id string, r api.ChatResponse) ChatCompletion {
  88. return ChatCompletion{
  89. Id: id,
  90. Object: "chat.completion",
  91. Created: r.CreatedAt.Unix(),
  92. Model: r.Model,
  93. SystemFingerprint: "fp_ollama",
  94. Choices: []Choice{{
  95. Index: 0,
  96. Message: Message{Role: r.Message.Role, Content: r.Message.Content},
  97. FinishReason: func(reason string) *string {
  98. if len(reason) > 0 {
  99. return &reason
  100. }
  101. return nil
  102. }(r.DoneReason),
  103. }},
  104. Usage: Usage{
  105. // TODO: ollama returns 0 for prompt eval if the prompt was cached, but openai returns the actual count
  106. PromptTokens: r.PromptEvalCount,
  107. CompletionTokens: r.EvalCount,
  108. TotalTokens: r.PromptEvalCount + r.EvalCount,
  109. },
  110. }
  111. }
  112. func toChunk(id string, r api.ChatResponse) ChatCompletionChunk {
  113. return ChatCompletionChunk{
  114. Id: id,
  115. Object: "chat.completion.chunk",
  116. Created: time.Now().Unix(),
  117. Model: r.Model,
  118. SystemFingerprint: "fp_ollama",
  119. Choices: []ChunkChoice{{
  120. Index: 0,
  121. Delta: Message{Role: "assistant", Content: r.Message.Content},
  122. FinishReason: func(reason string) *string {
  123. if len(reason) > 0 {
  124. return &reason
  125. }
  126. return nil
  127. }(r.DoneReason),
  128. }},
  129. }
  130. }
  131. func fromRequest(r ChatCompletionRequest) api.ChatRequest {
  132. var messages []api.Message
  133. for _, msg := range r.Messages {
  134. messages = append(messages, api.Message{Role: msg.Role, Content: msg.Content})
  135. }
  136. options := make(map[string]interface{})
  137. switch stop := r.Stop.(type) {
  138. case string:
  139. options["stop"] = []string{stop}
  140. case []interface{}:
  141. var stops []string
  142. for _, s := range stop {
  143. if str, ok := s.(string); ok {
  144. stops = append(stops, str)
  145. }
  146. }
  147. options["stop"] = stops
  148. }
  149. if r.MaxTokens != nil {
  150. options["num_predict"] = *r.MaxTokens
  151. }
  152. if r.Temperature != nil {
  153. options["temperature"] = *r.Temperature * 2.0
  154. } else {
  155. options["temperature"] = 1.0
  156. }
  157. if r.Seed != nil {
  158. options["seed"] = *r.Seed
  159. // temperature=0 is required for reproducible outputs
  160. options["temperature"] = 0.0
  161. }
  162. if r.FrequencyPenalty != nil {
  163. options["frequency_penalty"] = *r.FrequencyPenalty * 2.0
  164. }
  165. if r.PresencePenalty != nil {
  166. options["presence_penalty"] = *r.PresencePenalty * 2.0
  167. }
  168. if r.TopP != nil {
  169. options["top_p"] = *r.TopP
  170. } else {
  171. options["top_p"] = 1.0
  172. }
  173. var format string
  174. if r.ResponseFormat != nil && r.ResponseFormat.Type == "json_object" {
  175. format = "json"
  176. }
  177. return api.ChatRequest{
  178. Model: r.Model,
  179. Messages: messages,
  180. Format: format,
  181. Options: options,
  182. Stream: &r.Stream,
  183. }
  184. }
  185. type writer struct {
  186. stream bool
  187. id string
  188. gin.ResponseWriter
  189. }
  190. func (w *writer) writeError(code int, data []byte) (int, error) {
  191. var serr api.StatusError
  192. err := json.Unmarshal(data, &serr)
  193. if err != nil {
  194. return 0, err
  195. }
  196. w.ResponseWriter.Header().Set("Content-Type", "application/json")
  197. err = json.NewEncoder(w.ResponseWriter).Encode(NewError(http.StatusInternalServerError, serr.Error()))
  198. if err != nil {
  199. return 0, err
  200. }
  201. return len(data), nil
  202. }
  203. func (w *writer) writeResponse(data []byte) (int, error) {
  204. var chatResponse api.ChatResponse
  205. err := json.Unmarshal(data, &chatResponse)
  206. if err != nil {
  207. return 0, err
  208. }
  209. // chat chunk
  210. if w.stream {
  211. d, err := json.Marshal(toChunk(w.id, chatResponse))
  212. if err != nil {
  213. return 0, err
  214. }
  215. w.ResponseWriter.Header().Set("Content-Type", "text/event-stream")
  216. _, err = w.ResponseWriter.Write([]byte(fmt.Sprintf("data: %s\n\n", d)))
  217. if err != nil {
  218. return 0, err
  219. }
  220. if chatResponse.Done {
  221. _, err = w.ResponseWriter.Write([]byte("data: [DONE]\n\n"))
  222. if err != nil {
  223. return 0, err
  224. }
  225. }
  226. return len(data), nil
  227. }
  228. // chat completion
  229. w.ResponseWriter.Header().Set("Content-Type", "application/json")
  230. err = json.NewEncoder(w.ResponseWriter).Encode(toChatCompletion(w.id, chatResponse))
  231. if err != nil {
  232. return 0, err
  233. }
  234. return len(data), nil
  235. }
  236. func (w *writer) Write(data []byte) (int, error) {
  237. code := w.ResponseWriter.Status()
  238. if code != http.StatusOK {
  239. return w.writeError(code, data)
  240. }
  241. return w.writeResponse(data)
  242. }
  243. func Middleware() gin.HandlerFunc {
  244. return func(c *gin.Context) {
  245. var req ChatCompletionRequest
  246. err := c.ShouldBindJSON(&req)
  247. if err != nil {
  248. c.AbortWithStatusJSON(http.StatusBadRequest, NewError(http.StatusBadRequest, err.Error()))
  249. return
  250. }
  251. if len(req.Messages) == 0 {
  252. c.AbortWithStatusJSON(http.StatusBadRequest, NewError(http.StatusBadRequest, "[] is too short - 'messages'"))
  253. return
  254. }
  255. var b bytes.Buffer
  256. if err := json.NewEncoder(&b).Encode(fromRequest(req)); err != nil {
  257. c.AbortWithStatusJSON(http.StatusInternalServerError, NewError(http.StatusInternalServerError, err.Error()))
  258. return
  259. }
  260. c.Request.Body = io.NopCloser(&b)
  261. w := &writer{
  262. ResponseWriter: c.Writer,
  263. stream: req.Stream,
  264. id: fmt.Sprintf("chatcmpl-%d", rand.Intn(999)),
  265. }
  266. c.Writer = w
  267. c.Next()
  268. }
  269. }