Browse Source

runner.go: Don't try to extract image tags for text models

When processing a prompt, we look for image tags of the form
[img-0], which are inserted by the Ollama server process.
However, this can cause errors if the original prompt has these
tags - typically an image not found error is returned.

This changes tag searching behavior to be similar to the 0.3.x
series, which will largely avoid these problems. However,they can
still happen when input text with these tags is used with image
models. The correct solution is to escape the tags but this is a
larger issue with special sequences in general so this is an
incremental fix that should avoid the problem for the majority
of cases.
Jesse Gross 5 months ago
parent
commit
71e6a0d0d1
1 changed files with 9 additions and 3 deletions
  1. 9 3
      llama/runner/runner.go

+ 9 - 3
llama/runner/runner.go

@@ -164,10 +164,16 @@ func (s *Server) NewSequence(prompt string, images []ImageData, params NewSequen
 // generating image embeddings for each image
 // generating image embeddings for each image
 func (s *Server) inputs(prompt string, images []ImageData) ([]input, error) {
 func (s *Server) inputs(prompt string, images []ImageData) ([]input, error) {
 	var inputs []input
 	var inputs []input
+	var parts []string
+	var matches [][]string
 
 
-	re := regexp.MustCompile(`\[img-(\d+)\]`)
-	parts := re.Split(prompt, -1)
-	matches := re.FindAllStringSubmatch(prompt, -1)
+	if s.image != nil {
+		re := regexp.MustCompile(`\[img-(\d+)\]`)
+		parts = re.Split(prompt, -1)
+		matches = re.FindAllStringSubmatch(prompt, -1)
+	} else {
+		parts = []string{prompt}
+	}
 
 
 	for i, part := range parts {
 	for i, part := range parts {
 		// text - tokenize
 		// text - tokenize