Prechádzať zdrojové kódy

remove dependency on `llm`

jmorganca 11 mesiacov pred
rodič
commit
1a801fba2a
3 zmenil súbory, kde vykonal 3 pridanie a 6 odobranie
  1. 2 4
      llama/llama.go
  2. 0 1
      llama/runner/runner.go
  3. 1 1
      llama/sampling_ext.cpp

+ 2 - 4
llama/llama.go

@@ -35,8 +35,6 @@ import (
 	"runtime"
 	"strings"
 	"unsafe"
-
-	"github.com/ollama/ollama/llm"
 )
 
 func BackendInit() {
@@ -227,7 +225,7 @@ func (m *Model) Tokenize(text string, maxTokens int, addSpecial bool, parseSpeci
 	return tokens, nil
 }
 
-func Quantize(infile, outfile string, ftype llm.FileType) error {
+func Quantize(infile, outfile string, ftype uint32) error {
 	cinfile := C.CString(infile)
 	defer C.free(unsafe.Pointer(cinfile))
 
@@ -236,7 +234,7 @@ func Quantize(infile, outfile string, ftype llm.FileType) error {
 
 	params := C.llama_model_quantize_default_params()
 	params.nthread = -1
-	params.ftype = ftype.Value()
+	params.ftype = ftype
 
 	if rc := C.llama_model_quantize(cinfile, coutfile, &params); rc != 0 {
 		return fmt.Errorf("llama_model_quantize: %d", rc)

+ 0 - 1
llama/runner/runner.go

@@ -209,7 +209,6 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
 	for i, sq := range s.seqs {
 		if sq == nil {
 			s.seqs[i] = seq
-			fmt.Println("signal")
 			s.cond.Signal()
 			break
 		}

+ 1 - 1
llama/sampling_ext.cpp

@@ -17,7 +17,7 @@ struct llama_sampling_context* llama_sampling_cinit(struct llama_sampling_cparam
     sparams.mirostat_eta = params->mirostat_eta;
     sparams.penalize_nl = params->penalize_nl;
     sparams.seed = params->seed;
-    sparams.grammar = std::string(params->grammar);
+    sparams.grammar = params->grammar;
     return llama_sampling_init(sparams);
 }