openai.go 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. // openai package provides middleware for partial compatibility with the OpenAI REST API
  2. package openai
  3. import (
  4. "bytes"
  5. "encoding/json"
  6. "fmt"
  7. "io"
  8. "math/rand"
  9. "net/http"
  10. "time"
  11. "github.com/gin-gonic/gin"
  12. "github.com/ollama/ollama/api"
  13. )
  14. type Error struct {
  15. Message string `json:"message"`
  16. Type string `json:"type"`
  17. Param interface{} `json:"param"`
  18. Code *string `json:"code"`
  19. }
  20. type ErrorResponse struct {
  21. Error Error `json:"error"`
  22. }
  23. type Message struct {
  24. Role string `json:"role"`
  25. Content string `json:"content"`
  26. }
  27. type Choice struct {
  28. Index int `json:"index"`
  29. Message Message `json:"message"`
  30. FinishReason *string `json:"finish_reason"`
  31. }
  32. type ChunkChoice struct {
  33. Index int `json:"index"`
  34. Delta Message `json:"delta"`
  35. FinishReason *string `json:"finish_reason"`
  36. }
  37. type Usage struct {
  38. PromptTokens int `json:"prompt_tokens"`
  39. CompletionTokens int `json:"completion_tokens"`
  40. TotalTokens int `json:"total_tokens"`
  41. }
  42. type ResponseFormat struct {
  43. Type string `json:"type"`
  44. }
  45. type ChatCompletionRequest struct {
  46. Model string `json:"model"`
  47. Messages []Message `json:"messages"`
  48. Stream bool `json:"stream"`
  49. MaxTokens *int `json:"max_tokens"`
  50. Seed *int `json:"seed"`
  51. Stop any `json:"stop"`
  52. Temperature *float64 `json:"temperature"`
  53. FrequencyPenalty *float64 `json:"frequency_penalty"`
  54. PresencePenalty *float64 `json:"presence_penalty_penalty"`
  55. TopP *float64 `json:"top_p"`
  56. ResponseFormat *ResponseFormat `json:"response_format"`
  57. }
  58. type ChatCompletion struct {
  59. Id string `json:"id"`
  60. Object string `json:"object"`
  61. Created int64 `json:"created"`
  62. Model string `json:"model"`
  63. SystemFingerprint string `json:"system_fingerprint"`
  64. Choices []Choice `json:"choices"`
  65. Usage Usage `json:"usage,omitempty"`
  66. }
  67. type ChatCompletionChunk struct {
  68. Id string `json:"id"`
  69. Object string `json:"object"`
  70. Created int64 `json:"created"`
  71. Model string `json:"model"`
  72. SystemFingerprint string `json:"system_fingerprint"`
  73. Choices []ChunkChoice `json:"choices"`
  74. }
  75. func NewError(code int, message string) ErrorResponse {
  76. var etype string
  77. switch code {
  78. case http.StatusBadRequest:
  79. etype = "invalid_request_error"
  80. case http.StatusNotFound:
  81. etype = "not_found_error"
  82. default:
  83. etype = "api_error"
  84. }
  85. return ErrorResponse{Error{Type: etype, Message: message}}
  86. }
  87. func toChatCompletion(id string, r api.ChatResponse) ChatCompletion {
  88. return ChatCompletion{
  89. Id: id,
  90. Object: "chat.completion",
  91. Created: r.CreatedAt.Unix(),
  92. Model: r.Model,
  93. SystemFingerprint: "fp_ollama",
  94. Choices: []Choice{{
  95. Index: 0,
  96. Message: Message{Role: r.Message.Role, Content: r.Message.Content},
  97. FinishReason: &r.DoneReason,
  98. }},
  99. Usage: Usage{
  100. // TODO: ollama returns 0 for prompt eval if the prompt was cached, but openai returns the actual count
  101. PromptTokens: r.PromptEvalCount,
  102. CompletionTokens: r.EvalCount,
  103. TotalTokens: r.PromptEvalCount + r.EvalCount,
  104. },
  105. }
  106. }
  107. func toChunk(id string, r api.ChatResponse) ChatCompletionChunk {
  108. return ChatCompletionChunk{
  109. Id: id,
  110. Object: "chat.completion.chunk",
  111. Created: time.Now().Unix(),
  112. Model: r.Model,
  113. SystemFingerprint: "fp_ollama",
  114. Choices: []ChunkChoice{
  115. {
  116. Index: 0,
  117. Delta: Message{Role: "assistant", Content: r.Message.Content},
  118. FinishReason: &r.DoneReason,
  119. },
  120. },
  121. }
  122. }
  123. func fromRequest(r ChatCompletionRequest) api.ChatRequest {
  124. var messages []api.Message
  125. for _, msg := range r.Messages {
  126. messages = append(messages, api.Message{Role: msg.Role, Content: msg.Content})
  127. }
  128. options := make(map[string]interface{})
  129. switch stop := r.Stop.(type) {
  130. case string:
  131. options["stop"] = []string{stop}
  132. case []interface{}:
  133. var stops []string
  134. for _, s := range stop {
  135. if str, ok := s.(string); ok {
  136. stops = append(stops, str)
  137. }
  138. }
  139. options["stop"] = stops
  140. }
  141. if r.MaxTokens != nil {
  142. options["num_predict"] = *r.MaxTokens
  143. }
  144. if r.Temperature != nil {
  145. options["temperature"] = *r.Temperature * 2.0
  146. } else {
  147. options["temperature"] = 1.0
  148. }
  149. if r.Seed != nil {
  150. options["seed"] = *r.Seed
  151. // temperature=0 is required for reproducible outputs
  152. options["temperature"] = 0.0
  153. }
  154. if r.FrequencyPenalty != nil {
  155. options["frequency_penalty"] = *r.FrequencyPenalty * 2.0
  156. }
  157. if r.PresencePenalty != nil {
  158. options["presence_penalty"] = *r.PresencePenalty * 2.0
  159. }
  160. if r.TopP != nil {
  161. options["top_p"] = *r.TopP
  162. } else {
  163. options["top_p"] = 1.0
  164. }
  165. var format string
  166. if r.ResponseFormat != nil && r.ResponseFormat.Type == "json_object" {
  167. format = "json"
  168. }
  169. return api.ChatRequest{
  170. Model: r.Model,
  171. Messages: messages,
  172. Format: format,
  173. Options: options,
  174. Stream: &r.Stream,
  175. }
  176. }
  177. type writer struct {
  178. stream bool
  179. id string
  180. gin.ResponseWriter
  181. }
  182. func (w *writer) writeError(code int, data []byte) (int, error) {
  183. var serr api.StatusError
  184. err := json.Unmarshal(data, &serr)
  185. if err != nil {
  186. return 0, err
  187. }
  188. w.ResponseWriter.Header().Set("Content-Type", "application/json")
  189. err = json.NewEncoder(w.ResponseWriter).Encode(NewError(http.StatusInternalServerError, serr.Error()))
  190. if err != nil {
  191. return 0, err
  192. }
  193. return len(data), nil
  194. }
  195. func (w *writer) writeResponse(data []byte) (int, error) {
  196. var chatResponse api.ChatResponse
  197. err := json.Unmarshal(data, &chatResponse)
  198. if err != nil {
  199. return 0, err
  200. }
  201. // chat chunk
  202. if w.stream {
  203. d, err := json.Marshal(toChunk(w.id, chatResponse))
  204. if err != nil {
  205. return 0, err
  206. }
  207. w.ResponseWriter.Header().Set("Content-Type", "text/event-stream")
  208. _, err = w.ResponseWriter.Write([]byte(fmt.Sprintf("data: %s\n\n", d)))
  209. if err != nil {
  210. return 0, err
  211. }
  212. if chatResponse.Done {
  213. _, err = w.ResponseWriter.Write([]byte("data: [DONE]\n\n"))
  214. if err != nil {
  215. return 0, err
  216. }
  217. }
  218. return len(data), nil
  219. }
  220. // chat completion
  221. w.ResponseWriter.Header().Set("Content-Type", "application/json")
  222. err = json.NewEncoder(w.ResponseWriter).Encode(toChatCompletion(w.id, chatResponse))
  223. if err != nil {
  224. return 0, err
  225. }
  226. return len(data), nil
  227. }
  228. func (w *writer) Write(data []byte) (int, error) {
  229. code := w.ResponseWriter.Status()
  230. if code != http.StatusOK {
  231. return w.writeError(code, data)
  232. }
  233. return w.writeResponse(data)
  234. }
  235. func Middleware() gin.HandlerFunc {
  236. return func(c *gin.Context) {
  237. var req ChatCompletionRequest
  238. err := c.ShouldBindJSON(&req)
  239. if err != nil {
  240. c.AbortWithStatusJSON(http.StatusBadRequest, NewError(http.StatusBadRequest, err.Error()))
  241. return
  242. }
  243. if len(req.Messages) == 0 {
  244. c.AbortWithStatusJSON(http.StatusBadRequest, NewError(http.StatusBadRequest, "[] is too short - 'messages'"))
  245. return
  246. }
  247. var b bytes.Buffer
  248. if err := json.NewEncoder(&b).Encode(fromRequest(req)); err != nil {
  249. c.AbortWithStatusJSON(http.StatusInternalServerError, NewError(http.StatusInternalServerError, err.Error()))
  250. return
  251. }
  252. c.Request.Body = io.NopCloser(&b)
  253. w := &writer{
  254. ResponseWriter: c.Writer,
  255. stream: req.Stream,
  256. id: fmt.Sprintf("chatcmpl-%d", rand.Intn(999)),
  257. }
  258. c.Writer = w
  259. c.Next()
  260. }
  261. }