Explorar o código

fix: parallel queueing race condition caused silent failure (#1445)

* fix: queued request failures

- increase parallel requests to 2 to complete queued request, queueing is managed in ollama

* log steam errors
Bruce MacDonald hai 1 ano
pai
achega
bbe41ce41a
Modificáronse 1 ficheiros con 30 adicións e 26 borrados
  1. 30 26
      llm/llama.go

+ 30 - 26
llm/llama.go

@@ -341,6 +341,7 @@ func newLlama(model string, adapters, projectors []string, runners []ModelRunner
 		"--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
 		"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
 		"--n-gpu-layers", fmt.Sprintf("%d", numGPU),
+		"--parallel", "2",
 		"--embedding",
 	}
 
@@ -631,34 +632,37 @@ func (llm *llama) Predict(ctx context.Context, predict PredictOpts, fn func(Pred
 				continue
 			}
 
-			if evt, ok := bytes.CutPrefix(line, []byte("data: ")); ok {
-				var p prediction
-				if err := json.Unmarshal(evt, &p); err != nil {
-					return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
-				}
+			evt, ok := bytes.CutPrefix(line, []byte("data: "))
+			if !ok {
+				return fmt.Errorf("error parsing llm response stream: %s", line)
+			}
 
-				if p.Content != "" {
-					fn(PredictResult{
-						Model:     predict.Model,
-						CreatedAt: time.Now().UTC(),
-						Content:   p.Content,
-					})
-				}
+			var p prediction
+			if err := json.Unmarshal(evt, &p); err != nil {
+				return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
+			}
 
-				if p.Stop {
-					fn(PredictResult{
-						Model:         predict.Model,
-						CreatedAt:     time.Now().UTC(),
-						TotalDuration: time.Since(predict.CheckpointStart),
-
-						Done:               true,
-						PromptEvalCount:    p.Timings.PromptN,
-						PromptEvalDuration: parseDurationMs(p.Timings.PromptMS),
-						EvalCount:          p.Timings.PredictedN,
-						EvalDuration:       parseDurationMs(p.Timings.PredictedMS),
-					})
-					return nil
-				}
+			if p.Content != "" {
+				fn(PredictResult{
+					Model:     predict.Model,
+					CreatedAt: time.Now().UTC(),
+					Content:   p.Content,
+				})
+			}
+
+			if p.Stop {
+				fn(PredictResult{
+					Model:         predict.Model,
+					CreatedAt:     time.Now().UTC(),
+					TotalDuration: time.Since(predict.CheckpointStart),
+
+					Done:               true,
+					PromptEvalCount:    p.Timings.PromptN,
+					PromptEvalDuration: parseDurationMs(p.Timings.PromptMS),
+					EvalCount:          p.Timings.PredictedN,
+					EvalDuration:       parseDurationMs(p.Timings.PredictedMS),
+				})
+				return nil
 			}
 		}
 	}