فهرست منبع

llm: remove ambiguous comment when putting upper limit on predictions to avoid infinite generation (#5535)

Jeffrey Morgan 9 ماه پیش
والد
کامیت
53da2c6965
1فایلهای تغییر یافته به همراه1 افزوده شده و 2 حذف شده
  1. 1 2
      llm/server.go

+ 1 - 2
llm/server.go

@@ -699,10 +699,9 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu
 	}
 	defer s.sem.Release(1)
 
-	// only allow maximum 10 "context shifts" to avoid infinite generation
+	// put an upper limit on num_predict to avoid the model running on forever
 	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
 		req.Options.NumPredict = 10 * s.options.NumCtx
-		slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
 	}
 
 	request := map[string]any{