浏览代码

embeddings

jmorganca 11 月之前
父节点
当前提交
b2ef3bf490
共有 2 个文件被更改,包括 126 次插入35 次删除
  1. 19 0
      llama/llama.go
  2. 107 35
      llama/runner/runner.go

+ 19 - 0
llama/llama.go

@@ -55,6 +55,7 @@ func NewContextParams() ContextParams {
 	params.n_ctx = C.uint(2048)
 	params.n_threads = C.uint(runtime.NumCPU())
 	params.n_threads_batch = params.n_threads
+	params.embeddings = C.bool(true)
 	return ContextParams{c: params}
 }
 
@@ -124,6 +125,20 @@ func (c *Context) KvCacheSeqRm(seqId int, p0 int, p1 int) bool {
 	return bool(C.llama_kv_cache_seq_rm(c.c, C.int(seqId), C.int(p0), C.int(p1)))
 }
 
+// Get the embeddings for a sequence id
+func (c *Context) GetEmbeddingsSeq(seqId int) []float32 {
+	embeddings := unsafe.Pointer(C.llama_get_embeddings_seq(c.c, C.int(seqId)))
+	if embeddings == nil {
+		return nil
+	}
+
+	return unsafe.Slice((*float32)(embeddings), c.Model().NEmbd())
+}
+
+func (c *Context) GetEmbeddingsIth(i int) []float32 {
+	return unsafe.Slice((*float32)(unsafe.Pointer(C.llama_get_embeddings_ith(c.c, C.int32_t(i)))), c.Model().NEmbd())
+}
+
 func LoadModelFromFile(modelPath string, params ModelParams) *Model {
 	return &Model{c: C.llama_load_model_from_file(C.CString(modelPath), params.c)}
 }
@@ -225,6 +240,10 @@ func (m *Model) Tokenize(text string, maxTokens int, addSpecial bool, parseSpeci
 	return tokens, nil
 }
 
+func (m *Model) NEmbd() int {
+	return int(C.llama_n_embd(m.c))
+}
+
 func Quantize(infile, outfile string, ftype uint32) error {
 	cinfile := C.CString(infile)
 	defer C.free(unsafe.Pointer(cinfile))

+ 107 - 35
llama/runner/runner.go

@@ -23,9 +23,16 @@ type Sequence struct {
 	// tokens left to evaluate
 	tokens []int
 
+	// channel to send responses over
 	responses chan string
 
 	samplingCtx *llama.SamplingContext
+
+	// channel to send back the embedding if embedding only
+	embedding chan []float32
+
+	// true if an embedding are to be returned instead of text generation
+	embeddingOnly bool
 }
 
 // prompt returns true if the prompt is still being processed
@@ -33,38 +40,26 @@ func (s *Sequence) prompt() bool {
 	return s.nPast < len(s.tokens)-1
 }
 
-func (s *Server) NewSequence(r Request, w http.ResponseWriter) *Sequence {
-	var samplingParams llama.SamplingParams
-	samplingParams.TopK = r.TopK
-	samplingParams.TopP = r.TopP
-	samplingParams.TfsZ = r.TFSZ
-	samplingParams.TypicalP = r.TypicalP
-	samplingParams.Temp = r.Temperature
-	samplingParams.PenaltyRepeat = r.RepeatPenalty
-	samplingParams.PenaltyFreq = r.FrequencyPenalty
-	samplingParams.PenaltyPresent = r.PresencePenalty
-	samplingParams.Mirostat = r.Mirostat
-	samplingParams.MirostatTau = r.MirostatTau
-	samplingParams.MirostatEta = r.MirostatEta
-	samplingParams.PenalizeNl = r.PenalizeNewline
-	samplingParams.Seed = uint32(r.Seed)
-	samplingParams.Grammar = r.Grammar
-
-	tokens, err := s.lc.Model().Tokenize(r.Prompt, 2048, false, true)
+func (s *Server) NewSequence(prompt string, params *llama.SamplingParams, embedding bool) *Sequence {
+	tokens, err := s.lc.Model().Tokenize(prompt, 2048, false, true)
 	if err != nil {
 		panic(err)
 	}
 
-	sc := llama.NewSamplingContext(samplingParams)
-
-	for _, t := range tokens {
-		sc.Accept(s.lc, t, false)
+	var sc *llama.SamplingContext
+	if params != nil {
+		sc = llama.NewSamplingContext(*params)
+		for _, t := range tokens {
+			sc.Accept(s.lc, t, false)
+		}
 	}
 
 	return &Sequence{
-		tokens:      tokens,
-		responses:   make(chan string, 1),
-		samplingCtx: sc,
+		tokens:        tokens,
+		responses:     make(chan string, 1),
+		embedding:     make(chan []float32, 1),
+		samplingCtx:   sc,
+		embeddingOnly: embedding,
 	}
 }
 
@@ -152,6 +147,20 @@ func (s *Server) run(ctx context.Context) {
 					continue
 				}
 
+				// if done processing the prompt, generating an embedding and return
+				if seq.embeddingOnly {
+					embd := s.lc.GetEmbeddingsSeq(i)
+					if embd == nil {
+						embd = s.lc.GetEmbeddingsIth(ibatch[i])
+					}
+
+					seq.embedding <- embd
+					close(seq.embedding)
+					s.lc.KvCacheSeqRm(i, 0, -1)
+					s.seqs[i] = nil
+					continue
+				}
+
 				// sample a token
 				// logits := s.lc.GetLogitsIth(ibatch[i])
 				// token := s.lc.SampleTokenGreedy(logits)
@@ -178,7 +187,7 @@ func (s *Server) run(ctx context.Context) {
 	}
 }
 
-type Request struct {
+type CompletionRequest struct {
 	Prompt  string   `json:"prompt"`
 	Images  []string `json:"images"`
 	Grammar string   `json:"grammar"`
@@ -186,14 +195,14 @@ type Request struct {
 	api.Options
 }
 
-type Response struct {
+type CompletionResponse struct {
 	Token string `json:"token"`
 }
 
-func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
-	var request Request
-	request.Options = api.DefaultOptions()
-	if err := json.NewDecoder(r.Body).Decode(&request); err != nil {
+func (s *Server) completion(w http.ResponseWriter, r *http.Request) {
+	var req CompletionRequest
+	req.Options = api.DefaultOptions()
+	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
 		http.Error(w, "Bad request", http.StatusBadRequest)
 		return
 	}
@@ -203,8 +212,26 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
 	w.Header().Set("Transfer-Encoding", "chunked")
 	w.WriteHeader(http.StatusOK)
 
-	seq := s.NewSequence(request, w)
-
+	var samplingParams llama.SamplingParams
+	samplingParams.TopK = req.TopK
+	samplingParams.TopP = req.TopP
+	samplingParams.TfsZ = req.TFSZ
+	samplingParams.TypicalP = req.TypicalP
+	samplingParams.Temp = req.Temperature
+	samplingParams.PenaltyRepeat = req.RepeatPenalty
+	samplingParams.PenaltyFreq = req.FrequencyPenalty
+	samplingParams.PenaltyPresent = req.PresencePenalty
+	samplingParams.Mirostat = req.Mirostat
+	samplingParams.MirostatTau = req.MirostatTau
+	samplingParams.MirostatEta = req.MirostatEta
+	samplingParams.PenalizeNl = req.PenalizeNewline
+	samplingParams.Seed = uint32(req.Seed)
+	samplingParams.Grammar = req.Grammar
+
+	seq := s.NewSequence(req.Prompt, &samplingParams, false)
+
+	// TODO (jmorganca): add to sequence queue instead of
+	// failing if a slot isn't available
 	s.mu.Lock()
 	for i, sq := range s.seqs {
 		if sq == nil {
@@ -215,8 +242,9 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
 	}
 	s.mu.Unlock()
 
+	// stream the response
 	for token := range seq.responses {
-		if err := json.NewEncoder(w).Encode(&Response{
+		if err := json.NewEncoder(w).Encode(&CompletionResponse{
 			Token: token,
 		}); err != nil {
 			log.Println("Failed to encode result:", err)
@@ -233,6 +261,46 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
 	}
 }
 
+type EmbeddingRequest struct {
+	Prompt string `json:"prompt"`
+}
+
+type EmbeddingResponse struct {
+	Embedding []float32 `json:"embedding"`
+}
+
+// TODO (jmorganca): is it safe to do this concurrently with decoding?
+func (s *Server) embeddings(w http.ResponseWriter, r *http.Request) {
+	var req EmbeddingRequest
+	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+		http.Error(w, "Bad request", http.StatusBadRequest)
+		return
+	}
+
+	w.Header().Set("Content-Type", "application/json")
+
+	seq := s.NewSequence(req.Prompt, nil, true)
+
+	s.mu.Lock()
+	for i, sq := range s.seqs {
+		if sq == nil {
+			s.seqs[i] = seq
+			s.cond.Signal()
+			break
+		}
+	}
+	s.mu.Unlock()
+
+	embedding := <-seq.embedding
+
+	if err := json.NewEncoder(w).Encode(&EmbeddingResponse{
+		Embedding: embedding,
+	}); err != nil {
+		log.Println("Failed to encode result:", err)
+		return
+	}
+}
+
 func main() {
 	mpath := flag.String("model", "", "Path to model binary file")
 	ppath := flag.String("projector", "", "Path to projector binary file")
@@ -279,8 +347,12 @@ func main() {
 	}
 	defer listener.Close()
 
+	mux := http.NewServeMux()
+	mux.HandleFunc("/embeddings", server.embeddings)
+	mux.HandleFunc("/completion", server.completion)
+
 	httpServer := http.Server{
-		Handler: http.HandlerFunc(server.handler),
+		Handler: mux,
 	}
 
 	log.Println("Server listening on", addr)