jmorganca 11 月之前
父节点
当前提交
ec17359a68
共有 3 个文件被更改,包括 194 次插入140 次删除
  1. 17 17
      llama/ggml-metal-darwin_arm64.m
  2. 11 7
      llama/llama.go
  3. 166 116
      llama/runner/runner.go

+ 17 - 17
llama/ggml-metal-darwin_arm64.m

@@ -1499,27 +1499,27 @@ static enum ggml_status ggml_metal_graph_compute(
                         // to the matrix-vector kernel
                         // to the matrix-vector kernel
                         int ne11_mm_min = 1;
                         int ne11_mm_min = 1;
 
 
-#if 0
+
                         // the numbers below are measured on M2 Ultra for 7B and 13B models
                         // the numbers below are measured on M2 Ultra for 7B and 13B models
                         // these numbers do not translate to other devices or model sizes
                         // these numbers do not translate to other devices or model sizes
                         // TODO: need to find a better approach
                         // TODO: need to find a better approach
-                        if ([ctx->device.name isEqualToString:@"Apple M2 Ultra"]) {
-                            switch (src0t) {
-                                case GGML_TYPE_F16:  ne11_mm_min = 2;  break;
-                                case GGML_TYPE_Q8_0: ne11_mm_min = 7;  break;
-                                case GGML_TYPE_Q2_K: ne11_mm_min = 15; break;
-                                case GGML_TYPE_Q3_K: ne11_mm_min = 7;  break;
-                                case GGML_TYPE_Q4_0:
-                                case GGML_TYPE_Q4_1: ne11_mm_min = 15; break;
-                                case GGML_TYPE_Q4_K: ne11_mm_min = 11; break;
-                                case GGML_TYPE_Q5_0:                          // not tested yet
-                                case GGML_TYPE_Q5_1: ne11_mm_min = 13; break; // not tested yet
-                                case GGML_TYPE_Q5_K: ne11_mm_min = 7;  break;
-                                case GGML_TYPE_Q6_K: ne11_mm_min = 7;  break;
-                                default:             ne11_mm_min = 1;  break;
-                            }
+                        // if ([ctx->device.name isEqualToString:@"Apple M2 Ultra"]) {
+                        switch (src0t) {
+                            case GGML_TYPE_F16:  ne11_mm_min = 2;  break;
+                            case GGML_TYPE_Q8_0: ne11_mm_min = 7;  break;
+                            case GGML_TYPE_Q2_K: ne11_mm_min = 15; break;
+                            case GGML_TYPE_Q3_K: ne11_mm_min = 7;  break;
+                            case GGML_TYPE_Q4_0:
+                            case GGML_TYPE_Q4_1: ne11_mm_min = 15; break;
+                            case GGML_TYPE_Q4_K: ne11_mm_min = 11; break;
+                            case GGML_TYPE_Q5_0:                          // not tested yet
+                            case GGML_TYPE_Q5_1: ne11_mm_min = 13; break; // not tested yet
+                            case GGML_TYPE_Q5_K: ne11_mm_min = 7;  break;
+                            case GGML_TYPE_Q6_K: ne11_mm_min = 7;  break;
+                            default:             ne11_mm_min = 1;  break;
                         }
                         }
-#endif
+                        // }
+
 
 
                         // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
                         // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
                         // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
                         // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel

+ 11 - 7
llama/llama.go

@@ -38,15 +38,14 @@ import (
 	"github.com/ollama/ollama/llm"
 	"github.com/ollama/ollama/llm"
 )
 )
 
 
-// SystemInfo is an unused example of calling llama.cpp functions using CGo
-func PrintSystemInfo() string {
-	return C.GoString(C.llama_print_system_info())
-}
-
 func BackendInit() {
 func BackendInit() {
 	C.llama_backend_init()
 	C.llama_backend_init()
 }
 }
 
 
+func PrintSystemInfo() string {
+	return C.GoString(C.llama_print_system_info())
+}
+
 type ContextParams struct {
 type ContextParams struct {
 	c C.struct_llama_context_params
 	c C.struct_llama_context_params
 }
 }
@@ -100,7 +99,8 @@ func (c *Context) Model() *Model {
 	return &Model{c: C.llama_get_model(c.c)}
 	return &Model{c: C.llama_get_model(c.c)}
 }
 }
 
 
-func (c *Context) SampleTokenGreedy(batch Batch) int {
+// TODO: break this up
+func (c *Context) SampleTokenGreedy(batch Batch, i int) int {
 	nv := c.Model().NumVocab()
 	nv := c.Model().NumVocab()
 
 
 	// TODO(jmorganca): split this up into different functions
 	// TODO(jmorganca): split this up into different functions
@@ -108,7 +108,7 @@ func (c *Context) SampleTokenGreedy(batch Batch) int {
 	defer C.free(unsafe.Pointer(candidates))
 	defer C.free(unsafe.Pointer(candidates))
 
 
 	// get most recent logits
 	// get most recent logits
-	logits := C.llama_get_logits_ith(c.c, C.int(batch.NumTokens()-1))
+	logits := C.llama_get_logits_ith(c.c, C.int(i))
 	for i := 0; i < int(nv); i++ {
 	for i := 0; i < int(nv); i++ {
 		ptr := (*C.struct_llama_token_data)(unsafe.Pointer(uintptr(unsafe.Pointer(candidates)) + uintptr(i)*unsafe.Sizeof(C.struct_llama_token_data{})))
 		ptr := (*C.struct_llama_token_data)(unsafe.Pointer(uintptr(unsafe.Pointer(candidates)) + uintptr(i)*unsafe.Sizeof(C.struct_llama_token_data{})))
 		ptr.id = C.int(i)
 		ptr.id = C.int(i)
@@ -123,6 +123,10 @@ func (c *Context) SampleTokenGreedy(batch Batch) int {
 	}))
 	}))
 }
 }
 
 
+func (c *Context) KvCacheSeqRm(seqId int, p0 int, p1 int) bool {
+	return bool(C.llama_kv_cache_seq_rm(c.c, C.int(seqId), C.int(p0), C.int(p1)))
+}
+
 func LoadModelFromFile(modelPath string, params ModelParams) *Model {
 func LoadModelFromFile(modelPath string, params ModelParams) *Model {
 	return &Model{c: C.llama_load_model_from_file(C.CString(modelPath), params.c)}
 	return &Model{c: C.llama_load_model_from_file(C.CString(modelPath), params.c)}
 }
 }

+ 166 - 116
llama/runner/runner.go

@@ -1,7 +1,7 @@
 package main
 package main
 
 
 import (
 import (
-	"encoding/base64"
+	"context"
 	"encoding/json"
 	"encoding/json"
 	"flag"
 	"flag"
 	"fmt"
 	"fmt"
@@ -9,140 +9,205 @@ import (
 	"log/slog"
 	"log/slog"
 	"net"
 	"net"
 	"net/http"
 	"net/http"
-	"regexp"
-	"strconv"
 	"sync"
 	"sync"
 
 
 	"github.com/ollama/ollama/llama"
 	"github.com/ollama/ollama/llama"
 )
 )
 
 
-type Request struct {
-	Prompt string   `json:"prompt"`
-	Images []string `json:"images"`
+type Sequence struct {
+	// number of tokens evaluated
+	nPast int
+
+	// tokens left to evaluate
+	tokens []int
+
+	responses chan string
 }
 }
 
 
-type Response struct {
-	Token string `json:"token"`
+// prompt returns true if the prompt is still being processed
+func (s *Sequence) prompt() bool {
+	return s.nPast < len(s.tokens)-1
+}
+
+func (s *Server) NewSequence(text string, w http.ResponseWriter) *Sequence {
+	tokens, err := s.lc.Model().Tokenize(text, 2048, true, true)
+	if err != nil {
+		panic(err)
+	}
+
+	return &Sequence{
+		tokens:    tokens,
+		responses: make(chan string, 1),
+	}
 }
 }
 
 
 type Server struct {
 type Server struct {
 	model *llama.Model
 	model *llama.Model
 	lc    *llama.Context
 	lc    *llama.Context
 	cc    *llama.ClipContext
 	cc    *llama.ClipContext
-}
-
-var mu sync.Mutex
-
-func (s *Server) stream(w http.ResponseWriter, r *http.Request) {
-	var request Request
-	if err := json.NewDecoder(r.Body).Decode(&request); err != nil {
-		http.Error(w, "Bad request", http.StatusBadRequest)
-		return
-	}
 
 
-	mu.Lock()
-	defer mu.Unlock()
+	// parallel is the number of parallel requests to handle
+	parallel int
 
 
-	// Set the headers to indicate streaming
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Transfer-Encoding", "chunked")
-	w.WriteHeader(http.StatusOK)
+	// seqs is the list of parallel sequences being evaluated
+	seqs []*Sequence
 
 
-	enc := json.NewEncoder(w)
+	mu sync.Mutex
 
 
-	// create embeddings for each image
-	var embeddings []*llama.LlavaImageEmbed
-	if s.cc != nil {
-		for _, img := range request.Images {
-			data, err := base64.StdEncoding.DecodeString(img)
-			if err != nil {
-				http.Error(w, "Failed to decode image", http.StatusBadRequest)
-				return
-			}
+	cond *sync.Cond
+}
 
 
-			embd := llama.NewLlavaImageEmbed(s.cc, data)
-			embeddings = append(embeddings, embd)
+func (s *Server) allNil() bool {
+	for _, item := range s.seqs {
+		if item != nil {
+			return false
 		}
 		}
 	}
 	}
+	return true
+}
 
 
-	var nPast int
+func (s *Server) run(ctx context.Context) {
+	batch := llama.NewBatch(512, 0, s.parallel)
+	defer batch.Free()
 
 
-	// eval the prompt
-	re := regexp.MustCompile(`\[\s*img-(\d+)\s*\]`)
-	matches := re.FindAllStringSubmatchIndex(request.Prompt, -1)
+	for {
+		select {
+		case <-ctx.Done():
+			return
+		default:
+			slog.Info("Processing batch", "seqs", len(s.seqs))
+			s.mu.Lock()
+			for s.allNil() {
+				fmt.Println("wait")
+				s.cond.Wait() // Wait until an item is added
+			}
+			s.mu.Unlock()
+
+			fmt.Println("seqs", s.seqs, len(s.seqs))
+
+			// prepare the batch
+			ibatch := make([]int, s.parallel)
+			for i, seq := range s.seqs {
+				if seq == nil {
+					continue
+				}
+
+				for j, t := range seq.tokens {
+					// todo: make this n_batch
+					if j > 512 {
+						break
+					}
+
+					batch.Add(t, seq.nPast, []int{i}, !seq.prompt())
+					seq.nPast++
+
+					if seq.prompt() {
+						ibatch[i] = batch.NumTokens() + 1
+					}
+				}
+			}
 
 
-	// eval each chunk including images
-	pos := 0
-	for _, match := range matches {
-		part := request.Prompt[pos:match[0]]
-		fmt.Println("Text part:", part)
+			err := s.lc.Decode(batch)
+			if err != nil {
+				panic("Failed to decode")
+			}
 
 
-		// eval text before image
-		err := s.evalText(part, &nPast)
-		if err != nil {
-			log.Println("Failed to eval text:", err)
-			return
-		}
+			for i, seq := range s.seqs {
+				if seq == nil {
+					continue
+				}
+
+				// don't sample prompt processing
+				if seq.prompt() {
+					if len(seq.tokens) < 512 {
+						seq.tokens = []int{}
+					} else {
+						seq.tokens = seq.tokens[512:]
+					}
+
+					continue
+				}
+
+				// sample a token
+				// TODO: sample based on the sequence
+				fmt.Println("Sampling token", i, ibatch[i])
+				token := s.lc.SampleTokenGreedy(batch, ibatch[i])
+
+				// if it's an end of sequence token, break
+				// TODO: just end this sequence
+				if s.model.TokenIsEog(token) {
+					// TODO: end the sequence instead of quitting the pool
+					s.lc.KvCacheSeqRm(i, 0, -1)
+					close(seq.responses)
+					s.seqs[i] = nil
+					continue
+				}
+
+				seq.responses <- s.model.TokenToPiece(token)
+				seq.tokens = []int{token}
+			}
 
 
-		// eval image
-		imgIndexStr := request.Prompt[match[2]:match[3]]
-		imgIndex, err := strconv.Atoi(imgIndexStr)
-		if err != nil {
-			slog.Warn("Failed to parse image index", "index", imgIndexStr)
-			continue
+			batch.Clear()
 		}
 		}
+	}
+}
 
 
-		fmt.Println("Tag index:", imgIndex)
-		if imgIndex <= len(embeddings) {
-			slog.Info("evaluating image", "index", imgIndex)
-			llama.LlavaEvalImageEmbed(s.lc, embeddings[imgIndex], 512, &nPast)
-		}
+type Request struct {
+	Prompt string   `json:"prompt"`
+	Images []string `json:"images"`
+}
 
 
-		pos = match[1]
-	}
+type Response struct {
+	Token string `json:"token"`
+}
 
 
-	// eval remaining text
-	if pos < len(request.Prompt) {
-		s.evalText(request.Prompt[pos:], &nPast)
+func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
+	var request Request
+	if err := json.NewDecoder(r.Body).Decode(&request); err != nil {
+		http.Error(w, "Bad request", http.StatusBadRequest)
+		return
 	}
 	}
 
 
-	batch := llama.NewBatch(512, 0, 1)
-	defer batch.Free()
+	// Set the headers to indicate streaming
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Transfer-Encoding", "chunked")
+	w.WriteHeader(http.StatusOK)
 
 
-	// main loop
-	for n := nPast; n < 2048; n++ {
-		// sample a token
-		token := s.lc.SampleTokenGreedy(batch)
+	seq := s.NewSequence(request.Prompt, w)
 
 
-		// if it's an end of sequence token, break
-		if s.model.TokenIsEog(token) {
+	s.mu.Lock()
+	for i, sq := range s.seqs {
+		if sq == nil {
+			s.seqs[i] = seq
+			fmt.Println("signal")
+			s.cond.Signal()
 			break
 			break
 		}
 		}
+	}
+	s.mu.Unlock()
 
 
-		// print the token
-		str := s.model.TokenToPiece(token)
-
-		if err := enc.Encode(&Response{Token: str}); err != nil {
+	for token := range seq.responses {
+		if err := json.NewEncoder(w).Encode(&Response{
+			Token: token,
+		}); err != nil {
 			log.Println("Failed to encode result:", err)
 			log.Println("Failed to encode result:", err)
 			return
 			return
 		}
 		}
-		w.(http.Flusher).Flush()
-
-		batch.Clear()
-		batch.Add(token, n, []int{0}, true)
 
 
-		err := s.lc.Decode(batch)
-		if err != nil {
-			panic("Failed to decode")
+		flusher, ok := w.(http.Flusher)
+		if !ok {
+			http.Error(w, "Streaming not supported", http.StatusInternalServerError)
+			return
 		}
 		}
-	}
 
 
-	s.lc.KvCacheClear()
+		flusher.Flush()
+	}
 }
 }
 
 
 func main() {
 func main() {
 	mpath := flag.String("model", "", "Path to model binary file")
 	mpath := flag.String("model", "", "Path to model binary file")
 	ppath := flag.String("projector", "", "Path to projector binary file")
 	ppath := flag.String("projector", "", "Path to projector binary file")
+	parallel := flag.Int("parallel", 1, "Number of sequences to handle simultaneously")
 	flag.Parse()
 	flag.Parse()
 
 
 	// load the model
 	// load the model
@@ -156,7 +221,7 @@ func main() {
 	}
 	}
 
 
 	var cc *llama.ClipContext
 	var cc *llama.ClipContext
-	if ppath != nil {
+	if *ppath != "" {
 		cc = llama.NewClipContext(*ppath)
 		cc = llama.NewClipContext(*ppath)
 		if cc == nil {
 		if cc == nil {
 			panic("Failed to create clip context")
 			panic("Failed to create clip context")
@@ -164,11 +229,18 @@ func main() {
 	}
 	}
 
 
 	server := &Server{
 	server := &Server{
-		model: model,
-		lc:    lc,
-		cc:    cc,
+		model:    model,
+		lc:       lc,
+		cc:       cc,
+		parallel: *parallel,
+		seqs:     make([]*Sequence, *parallel),
 	}
 	}
 
 
+	server.cond = sync.NewCond(&server.mu)
+
+	ctx, cancel := context.WithCancel(context.Background())
+	go server.run(ctx)
+
 	addr := "127.0.0.1:8080"
 	addr := "127.0.0.1:8080"
 	listener, err := net.Listen("tcp", addr)
 	listener, err := net.Listen("tcp", addr)
 	if err != nil {
 	if err != nil {
@@ -178,35 +250,13 @@ func main() {
 	defer listener.Close()
 	defer listener.Close()
 
 
 	httpServer := http.Server{
 	httpServer := http.Server{
-		Handler: http.HandlerFunc(server.stream),
+		Handler: http.HandlerFunc(server.handler),
 	}
 	}
 
 
 	log.Println("Server listening on", addr)
 	log.Println("Server listening on", addr)
 	if err := httpServer.Serve(listener); err != nil {
 	if err := httpServer.Serve(listener); err != nil {
 		log.Fatal("server error:", err)
 		log.Fatal("server error:", err)
 	}
 	}
-}
-
-func (s *Server) evalText(text string, nPast *int) error {
-	// eval before
-	batch := llama.NewBatch(512, 0, 1)
-	defer batch.Free()
-
-	tokens, err := s.lc.Model().Tokenize(text, 2048, true, true)
-	if err != nil {
-		return fmt.Errorf("tokenize failed: %w", err)
-	}
-
-	// prompt eval
-	for _, t := range tokens {
-		batch.Add(t, *nPast, []int{0}, true)
-		*nPast++
-	}
-
-	err = s.lc.Decode(batch)
-	if err != nil {
-		return fmt.Errorf("decode failed: %w", err)
-	}
 
 
-	return nil
+	cancel()
 }
 }