Просмотр исходного кода

llm: auto detect models that require Ollama Engine (#1)

Daniel Hiltgen 1 месяц назад
Родитель
Сommit
ab39e08eb9
2 измененных файлов с 5 добавлено и 1 удалено
  1. 4 0
      fs/ggml/ggml.go
  2. 1 1
      llm/server.go

+ 4 - 0
fs/ggml/ggml.go

@@ -133,6 +133,10 @@ func (kv KV) Floats(key string, defaultValue ...[]float32) []float32 {
 	return s
 }
 
+func (kv KV) OllamaEngineRequired() bool {
+	return kv.Architecture() == "gemma3"
+}
+
 func keyValue[T string | uint32 | uint64 | float32 | *array | bool](kv KV, key string, defaultValue ...T) T {
 	if !strings.HasPrefix(key, "tokenizer.") && !strings.HasPrefix(key, "general.") {
 		key = kv.Architecture() + "." + key

+ 1 - 1
llm/server.go

@@ -271,7 +271,7 @@ func NewLlamaServer(gpus discover.GpuInfoList, modelPath string, f *ggml.GGML, a
 
 	var llamaModel *llama.Model
 	var textProcessor model.TextProcessor
-	if envconfig.NewEngine() {
+	if envconfig.NewEngine() || f.KV().OllamaEngineRequired() {
 		textProcessor, err = model.NewTextProcessor(modelPath)
 		if err != nil {
 			// To prepare for opt-out mode, instead of treating this as an error, we fallback to the old runner