|
@@ -1,7 +1,7 @@
|
|
|
package main
|
|
|
|
|
|
import (
|
|
|
- "encoding/base64"
|
|
|
+ "context"
|
|
|
"encoding/json"
|
|
|
"flag"
|
|
|
"fmt"
|
|
@@ -9,140 +9,205 @@ import (
|
|
|
"log/slog"
|
|
|
"net"
|
|
|
"net/http"
|
|
|
- "regexp"
|
|
|
- "strconv"
|
|
|
"sync"
|
|
|
|
|
|
"github.com/ollama/ollama/llama"
|
|
|
)
|
|
|
|
|
|
-type Request struct {
|
|
|
- Prompt string `json:"prompt"`
|
|
|
- Images []string `json:"images"`
|
|
|
+type Sequence struct {
|
|
|
+ // number of tokens evaluated
|
|
|
+ nPast int
|
|
|
+
|
|
|
+ // tokens left to evaluate
|
|
|
+ tokens []int
|
|
|
+
|
|
|
+ responses chan string
|
|
|
}
|
|
|
|
|
|
-type Response struct {
|
|
|
- Token string `json:"token"`
|
|
|
+// prompt returns true if the prompt is still being processed
|
|
|
+func (s *Sequence) prompt() bool {
|
|
|
+ return s.nPast < len(s.tokens)-1
|
|
|
+}
|
|
|
+
|
|
|
+func (s *Server) NewSequence(text string, w http.ResponseWriter) *Sequence {
|
|
|
+ tokens, err := s.lc.Model().Tokenize(text, 2048, true, true)
|
|
|
+ if err != nil {
|
|
|
+ panic(err)
|
|
|
+ }
|
|
|
+
|
|
|
+ return &Sequence{
|
|
|
+ tokens: tokens,
|
|
|
+ responses: make(chan string, 1),
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
type Server struct {
|
|
|
model *llama.Model
|
|
|
lc *llama.Context
|
|
|
cc *llama.ClipContext
|
|
|
-}
|
|
|
-
|
|
|
-var mu sync.Mutex
|
|
|
-
|
|
|
-func (s *Server) stream(w http.ResponseWriter, r *http.Request) {
|
|
|
- var request Request
|
|
|
- if err := json.NewDecoder(r.Body).Decode(&request); err != nil {
|
|
|
- http.Error(w, "Bad request", http.StatusBadRequest)
|
|
|
- return
|
|
|
- }
|
|
|
|
|
|
- mu.Lock()
|
|
|
- defer mu.Unlock()
|
|
|
+ // parallel is the number of parallel requests to handle
|
|
|
+ parallel int
|
|
|
|
|
|
- // Set the headers to indicate streaming
|
|
|
- w.Header().Set("Content-Type", "application/json")
|
|
|
- w.Header().Set("Transfer-Encoding", "chunked")
|
|
|
- w.WriteHeader(http.StatusOK)
|
|
|
+ // seqs is the list of parallel sequences being evaluated
|
|
|
+ seqs []*Sequence
|
|
|
|
|
|
- enc := json.NewEncoder(w)
|
|
|
+ mu sync.Mutex
|
|
|
|
|
|
- // create embeddings for each image
|
|
|
- var embeddings []*llama.LlavaImageEmbed
|
|
|
- if s.cc != nil {
|
|
|
- for _, img := range request.Images {
|
|
|
- data, err := base64.StdEncoding.DecodeString(img)
|
|
|
- if err != nil {
|
|
|
- http.Error(w, "Failed to decode image", http.StatusBadRequest)
|
|
|
- return
|
|
|
- }
|
|
|
+ cond *sync.Cond
|
|
|
+}
|
|
|
|
|
|
- embd := llama.NewLlavaImageEmbed(s.cc, data)
|
|
|
- embeddings = append(embeddings, embd)
|
|
|
+func (s *Server) allNil() bool {
|
|
|
+ for _, item := range s.seqs {
|
|
|
+ if item != nil {
|
|
|
+ return false
|
|
|
}
|
|
|
}
|
|
|
+ return true
|
|
|
+}
|
|
|
|
|
|
- var nPast int
|
|
|
+func (s *Server) run(ctx context.Context) {
|
|
|
+ batch := llama.NewBatch(512, 0, s.parallel)
|
|
|
+ defer batch.Free()
|
|
|
|
|
|
- // eval the prompt
|
|
|
- re := regexp.MustCompile(`\[\s*img-(\d+)\s*\]`)
|
|
|
- matches := re.FindAllStringSubmatchIndex(request.Prompt, -1)
|
|
|
+ for {
|
|
|
+ select {
|
|
|
+ case <-ctx.Done():
|
|
|
+ return
|
|
|
+ default:
|
|
|
+ slog.Info("Processing batch", "seqs", len(s.seqs))
|
|
|
+ s.mu.Lock()
|
|
|
+ for s.allNil() {
|
|
|
+ fmt.Println("wait")
|
|
|
+ s.cond.Wait() // Wait until an item is added
|
|
|
+ }
|
|
|
+ s.mu.Unlock()
|
|
|
+
|
|
|
+ fmt.Println("seqs", s.seqs, len(s.seqs))
|
|
|
+
|
|
|
+ // prepare the batch
|
|
|
+ ibatch := make([]int, s.parallel)
|
|
|
+ for i, seq := range s.seqs {
|
|
|
+ if seq == nil {
|
|
|
+ continue
|
|
|
+ }
|
|
|
+
|
|
|
+ for j, t := range seq.tokens {
|
|
|
+ // todo: make this n_batch
|
|
|
+ if j > 512 {
|
|
|
+ break
|
|
|
+ }
|
|
|
+
|
|
|
+ batch.Add(t, seq.nPast, []int{i}, !seq.prompt())
|
|
|
+ seq.nPast++
|
|
|
+
|
|
|
+ if seq.prompt() {
|
|
|
+ ibatch[i] = batch.NumTokens() + 1
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- // eval each chunk including images
|
|
|
- pos := 0
|
|
|
- for _, match := range matches {
|
|
|
- part := request.Prompt[pos:match[0]]
|
|
|
- fmt.Println("Text part:", part)
|
|
|
+ err := s.lc.Decode(batch)
|
|
|
+ if err != nil {
|
|
|
+ panic("Failed to decode")
|
|
|
+ }
|
|
|
|
|
|
- // eval text before image
|
|
|
- err := s.evalText(part, &nPast)
|
|
|
- if err != nil {
|
|
|
- log.Println("Failed to eval text:", err)
|
|
|
- return
|
|
|
- }
|
|
|
+ for i, seq := range s.seqs {
|
|
|
+ if seq == nil {
|
|
|
+ continue
|
|
|
+ }
|
|
|
+
|
|
|
+ // don't sample prompt processing
|
|
|
+ if seq.prompt() {
|
|
|
+ if len(seq.tokens) < 512 {
|
|
|
+ seq.tokens = []int{}
|
|
|
+ } else {
|
|
|
+ seq.tokens = seq.tokens[512:]
|
|
|
+ }
|
|
|
+
|
|
|
+ continue
|
|
|
+ }
|
|
|
+
|
|
|
+ // sample a token
|
|
|
+ // TODO: sample based on the sequence
|
|
|
+ fmt.Println("Sampling token", i, ibatch[i])
|
|
|
+ token := s.lc.SampleTokenGreedy(batch, ibatch[i])
|
|
|
+
|
|
|
+ // if it's an end of sequence token, break
|
|
|
+ // TODO: just end this sequence
|
|
|
+ if s.model.TokenIsEog(token) {
|
|
|
+ // TODO: end the sequence instead of quitting the pool
|
|
|
+ s.lc.KvCacheSeqRm(i, 0, -1)
|
|
|
+ close(seq.responses)
|
|
|
+ s.seqs[i] = nil
|
|
|
+ continue
|
|
|
+ }
|
|
|
+
|
|
|
+ seq.responses <- s.model.TokenToPiece(token)
|
|
|
+ seq.tokens = []int{token}
|
|
|
+ }
|
|
|
|
|
|
- // eval image
|
|
|
- imgIndexStr := request.Prompt[match[2]:match[3]]
|
|
|
- imgIndex, err := strconv.Atoi(imgIndexStr)
|
|
|
- if err != nil {
|
|
|
- slog.Warn("Failed to parse image index", "index", imgIndexStr)
|
|
|
- continue
|
|
|
+ batch.Clear()
|
|
|
}
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- fmt.Println("Tag index:", imgIndex)
|
|
|
- if imgIndex <= len(embeddings) {
|
|
|
- slog.Info("evaluating image", "index", imgIndex)
|
|
|
- llama.LlavaEvalImageEmbed(s.lc, embeddings[imgIndex], 512, &nPast)
|
|
|
- }
|
|
|
+type Request struct {
|
|
|
+ Prompt string `json:"prompt"`
|
|
|
+ Images []string `json:"images"`
|
|
|
+}
|
|
|
|
|
|
- pos = match[1]
|
|
|
- }
|
|
|
+type Response struct {
|
|
|
+ Token string `json:"token"`
|
|
|
+}
|
|
|
|
|
|
- // eval remaining text
|
|
|
- if pos < len(request.Prompt) {
|
|
|
- s.evalText(request.Prompt[pos:], &nPast)
|
|
|
+func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
|
|
|
+ var request Request
|
|
|
+ if err := json.NewDecoder(r.Body).Decode(&request); err != nil {
|
|
|
+ http.Error(w, "Bad request", http.StatusBadRequest)
|
|
|
+ return
|
|
|
}
|
|
|
|
|
|
- batch := llama.NewBatch(512, 0, 1)
|
|
|
- defer batch.Free()
|
|
|
+ // Set the headers to indicate streaming
|
|
|
+ w.Header().Set("Content-Type", "application/json")
|
|
|
+ w.Header().Set("Transfer-Encoding", "chunked")
|
|
|
+ w.WriteHeader(http.StatusOK)
|
|
|
|
|
|
- // main loop
|
|
|
- for n := nPast; n < 2048; n++ {
|
|
|
- // sample a token
|
|
|
- token := s.lc.SampleTokenGreedy(batch)
|
|
|
+ seq := s.NewSequence(request.Prompt, w)
|
|
|
|
|
|
- // if it's an end of sequence token, break
|
|
|
- if s.model.TokenIsEog(token) {
|
|
|
+ s.mu.Lock()
|
|
|
+ for i, sq := range s.seqs {
|
|
|
+ if sq == nil {
|
|
|
+ s.seqs[i] = seq
|
|
|
+ fmt.Println("signal")
|
|
|
+ s.cond.Signal()
|
|
|
break
|
|
|
}
|
|
|
+ }
|
|
|
+ s.mu.Unlock()
|
|
|
|
|
|
- // print the token
|
|
|
- str := s.model.TokenToPiece(token)
|
|
|
-
|
|
|
- if err := enc.Encode(&Response{Token: str}); err != nil {
|
|
|
+ for token := range seq.responses {
|
|
|
+ if err := json.NewEncoder(w).Encode(&Response{
|
|
|
+ Token: token,
|
|
|
+ }); err != nil {
|
|
|
log.Println("Failed to encode result:", err)
|
|
|
return
|
|
|
}
|
|
|
- w.(http.Flusher).Flush()
|
|
|
-
|
|
|
- batch.Clear()
|
|
|
- batch.Add(token, n, []int{0}, true)
|
|
|
|
|
|
- err := s.lc.Decode(batch)
|
|
|
- if err != nil {
|
|
|
- panic("Failed to decode")
|
|
|
+ flusher, ok := w.(http.Flusher)
|
|
|
+ if !ok {
|
|
|
+ http.Error(w, "Streaming not supported", http.StatusInternalServerError)
|
|
|
+ return
|
|
|
}
|
|
|
- }
|
|
|
|
|
|
- s.lc.KvCacheClear()
|
|
|
+ flusher.Flush()
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
func main() {
|
|
|
mpath := flag.String("model", "", "Path to model binary file")
|
|
|
ppath := flag.String("projector", "", "Path to projector binary file")
|
|
|
+ parallel := flag.Int("parallel", 1, "Number of sequences to handle simultaneously")
|
|
|
flag.Parse()
|
|
|
|
|
|
// load the model
|
|
@@ -156,7 +221,7 @@ func main() {
|
|
|
}
|
|
|
|
|
|
var cc *llama.ClipContext
|
|
|
- if ppath != nil {
|
|
|
+ if *ppath != "" {
|
|
|
cc = llama.NewClipContext(*ppath)
|
|
|
if cc == nil {
|
|
|
panic("Failed to create clip context")
|
|
@@ -164,11 +229,18 @@ func main() {
|
|
|
}
|
|
|
|
|
|
server := &Server{
|
|
|
- model: model,
|
|
|
- lc: lc,
|
|
|
- cc: cc,
|
|
|
+ model: model,
|
|
|
+ lc: lc,
|
|
|
+ cc: cc,
|
|
|
+ parallel: *parallel,
|
|
|
+ seqs: make([]*Sequence, *parallel),
|
|
|
}
|
|
|
|
|
|
+ server.cond = sync.NewCond(&server.mu)
|
|
|
+
|
|
|
+ ctx, cancel := context.WithCancel(context.Background())
|
|
|
+ go server.run(ctx)
|
|
|
+
|
|
|
addr := "127.0.0.1:8080"
|
|
|
listener, err := net.Listen("tcp", addr)
|
|
|
if err != nil {
|
|
@@ -178,35 +250,13 @@ func main() {
|
|
|
defer listener.Close()
|
|
|
|
|
|
httpServer := http.Server{
|
|
|
- Handler: http.HandlerFunc(server.stream),
|
|
|
+ Handler: http.HandlerFunc(server.handler),
|
|
|
}
|
|
|
|
|
|
log.Println("Server listening on", addr)
|
|
|
if err := httpServer.Serve(listener); err != nil {
|
|
|
log.Fatal("server error:", err)
|
|
|
}
|
|
|
-}
|
|
|
-
|
|
|
-func (s *Server) evalText(text string, nPast *int) error {
|
|
|
- // eval before
|
|
|
- batch := llama.NewBatch(512, 0, 1)
|
|
|
- defer batch.Free()
|
|
|
-
|
|
|
- tokens, err := s.lc.Model().Tokenize(text, 2048, true, true)
|
|
|
- if err != nil {
|
|
|
- return fmt.Errorf("tokenize failed: %w", err)
|
|
|
- }
|
|
|
-
|
|
|
- // prompt eval
|
|
|
- for _, t := range tokens {
|
|
|
- batch.Add(t, *nPast, []int{0}, true)
|
|
|
- *nPast++
|
|
|
- }
|
|
|
-
|
|
|
- err = s.lc.Decode(batch)
|
|
|
- if err != nil {
|
|
|
- return fmt.Errorf("decode failed: %w", err)
|
|
|
- }
|
|
|
|
|
|
- return nil
|
|
|
+ cancel()
|
|
|
}
|