|
@@ -107,9 +107,14 @@ func toChatCompletion(id string, r api.ChatResponse) ChatCompletion {
|
|
|
Model: r.Model,
|
|
|
SystemFingerprint: "fp_ollama",
|
|
|
Choices: []Choice{{
|
|
|
- Index: 0,
|
|
|
- Message: Message{Role: r.Message.Role, Content: r.Message.Content},
|
|
|
- FinishReason: &r.DoneReason,
|
|
|
+ Index: 0,
|
|
|
+ Message: Message{Role: r.Message.Role, Content: r.Message.Content},
|
|
|
+ FinishReason: func(reason string) *string {
|
|
|
+ if len(reason) > 0 {
|
|
|
+ return &reason
|
|
|
+ }
|
|
|
+ return nil
|
|
|
+ }(r.DoneReason),
|
|
|
}},
|
|
|
Usage: Usage{
|
|
|
// TODO: ollama returns 0 for prompt eval if the prompt was cached, but openai returns the actual count
|
|
@@ -127,13 +132,16 @@ func toChunk(id string, r api.ChatResponse) ChatCompletionChunk {
|
|
|
Created: time.Now().Unix(),
|
|
|
Model: r.Model,
|
|
|
SystemFingerprint: "fp_ollama",
|
|
|
- Choices: []ChunkChoice{
|
|
|
- {
|
|
|
- Index: 0,
|
|
|
- Delta: Message{Role: "assistant", Content: r.Message.Content},
|
|
|
- FinishReason: &r.DoneReason,
|
|
|
- },
|
|
|
- },
|
|
|
+ Choices: []ChunkChoice{{
|
|
|
+ Index: 0,
|
|
|
+ Delta: Message{Role: "assistant", Content: r.Message.Content},
|
|
|
+ FinishReason: func(reason string) *string {
|
|
|
+ if len(reason) > 0 {
|
|
|
+ return &reason
|
|
|
+ }
|
|
|
+ return nil
|
|
|
+ }(r.DoneReason),
|
|
|
+ }},
|
|
|
}
|
|
|
}
|
|
|
|