123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262 |
- package main
- import (
- "context"
- "encoding/json"
- "flag"
- "fmt"
- "log"
- "log/slog"
- "net"
- "net/http"
- "sync"
- "github.com/ollama/ollama/llama"
- )
- type Sequence struct {
- // number of tokens evaluated
- nPast int
- // tokens left to evaluate
- tokens []int
- responses chan string
- }
- // prompt returns true if the prompt is still being processed
- func (s *Sequence) prompt() bool {
- return s.nPast < len(s.tokens)-1
- }
- func (s *Server) NewSequence(text string, w http.ResponseWriter) *Sequence {
- tokens, err := s.lc.Model().Tokenize(text, 2048, true, true)
- if err != nil {
- panic(err)
- }
- return &Sequence{
- tokens: tokens,
- responses: make(chan string, 1),
- }
- }
- type Server struct {
- model *llama.Model
- lc *llama.Context
- cc *llama.ClipContext
- // parallel is the number of parallel requests to handle
- parallel int
- // seqs is the list of parallel sequences being evaluated
- seqs []*Sequence
- mu sync.Mutex
- cond *sync.Cond
- }
- func (s *Server) allNil() bool {
- for _, item := range s.seqs {
- if item != nil {
- return false
- }
- }
- return true
- }
- func (s *Server) run(ctx context.Context) {
- batch := llama.NewBatch(512, 0, s.parallel)
- defer batch.Free()
- for {
- select {
- case <-ctx.Done():
- return
- default:
- slog.Info("Processing batch", "seqs", len(s.seqs))
- s.mu.Lock()
- for s.allNil() {
- fmt.Println("wait")
- s.cond.Wait() // Wait until an item is added
- }
- s.mu.Unlock()
- fmt.Println("seqs", s.seqs, len(s.seqs))
- // prepare the batch
- ibatch := make([]int, s.parallel)
- for i, seq := range s.seqs {
- if seq == nil {
- continue
- }
- for j, t := range seq.tokens {
- // todo: make this n_batch
- if j > 512 {
- break
- }
- batch.Add(t, seq.nPast, []int{i}, !seq.prompt())
- seq.nPast++
- if seq.prompt() {
- ibatch[i] = batch.NumTokens() + 1
- }
- }
- }
- err := s.lc.Decode(batch)
- if err != nil {
- panic("Failed to decode")
- }
- for i, seq := range s.seqs {
- if seq == nil {
- continue
- }
- // don't sample prompt processing
- if seq.prompt() {
- if len(seq.tokens) < 512 {
- seq.tokens = []int{}
- } else {
- seq.tokens = seq.tokens[512:]
- }
- continue
- }
- // sample a token
- // TODO: sample based on the sequence
- fmt.Println("Sampling token", i, ibatch[i])
- token := s.lc.SampleTokenGreedy(batch, ibatch[i])
- // if it's an end of sequence token, break
- // TODO: just end this sequence
- if s.model.TokenIsEog(token) {
- // TODO: end the sequence instead of quitting the pool
- s.lc.KvCacheSeqRm(i, 0, -1)
- close(seq.responses)
- s.seqs[i] = nil
- continue
- }
- seq.responses <- s.model.TokenToPiece(token)
- seq.tokens = []int{token}
- }
- batch.Clear()
- }
- }
- }
- type Request struct {
- Prompt string `json:"prompt"`
- Images []string `json:"images"`
- }
- type Response struct {
- Token string `json:"token"`
- }
- func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
- var request Request
- if err := json.NewDecoder(r.Body).Decode(&request); err != nil {
- http.Error(w, "Bad request", http.StatusBadRequest)
- return
- }
- // Set the headers to indicate streaming
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Transfer-Encoding", "chunked")
- w.WriteHeader(http.StatusOK)
- seq := s.NewSequence(request.Prompt, w)
- s.mu.Lock()
- for i, sq := range s.seqs {
- if sq == nil {
- s.seqs[i] = seq
- fmt.Println("signal")
- s.cond.Signal()
- break
- }
- }
- s.mu.Unlock()
- for token := range seq.responses {
- if err := json.NewEncoder(w).Encode(&Response{
- Token: token,
- }); err != nil {
- log.Println("Failed to encode result:", err)
- return
- }
- flusher, ok := w.(http.Flusher)
- if !ok {
- http.Error(w, "Streaming not supported", http.StatusInternalServerError)
- return
- }
- flusher.Flush()
- }
- }
- func main() {
- mpath := flag.String("model", "", "Path to model binary file")
- ppath := flag.String("projector", "", "Path to projector binary file")
- parallel := flag.Int("parallel", 1, "Number of sequences to handle simultaneously")
- flag.Parse()
- // load the model
- llama.BackendInit()
- params := llama.NewModelParams()
- model := llama.LoadModelFromFile(*mpath, params)
- ctxParams := llama.NewContextParams()
- lc := llama.NewContextWithModel(model, ctxParams)
- if lc == nil {
- panic("Failed to create context")
- }
- var cc *llama.ClipContext
- if *ppath != "" {
- cc = llama.NewClipContext(*ppath)
- if cc == nil {
- panic("Failed to create clip context")
- }
- }
- server := &Server{
- model: model,
- lc: lc,
- cc: cc,
- parallel: *parallel,
- seqs: make([]*Sequence, *parallel),
- }
- server.cond = sync.NewCond(&server.mu)
- ctx, cancel := context.WithCancel(context.Background())
- go server.run(ctx)
- addr := "127.0.0.1:8080"
- listener, err := net.Listen("tcp", addr)
- if err != nil {
- fmt.Println("Listen error:", err)
- return
- }
- defer listener.Close()
- httpServer := http.Server{
- Handler: http.HandlerFunc(server.handler),
- }
- log.Println("Server listening on", addr)
- if err := httpServer.Serve(listener); err != nil {
- log.Fatal("server error:", err)
- }
- cancel()
- }
|