Browse Source

llm: limit generation to 10x context size to avoid run on generations (#3918)

* llm: limit generation to 10x context size to avoid run on generations

* add comment

* simplify condition statement
Jeffrey Morgan 1 year ago
parent
commit
993cf8bf55
2 changed files with 11 additions and 2 deletions
  1. 4 2
      api/types.go
  2. 7 0
      llm/server.go

+ 4 - 2
api/types.go

@@ -396,8 +396,10 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
 func DefaultOptions() Options {
 	return Options{
 		// options set on request to runner
-		NumPredict:       -1,
-		NumKeep:          0,
+		NumPredict: -1,
+
+		// set a minimal num_keep to avoid issues on context shifts
+		NumKeep:          4,
 		Temperature:      0.8,
 		TopK:             40,
 		TopP:             0.9,

+ 7 - 0
llm/server.go

@@ -560,6 +560,13 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu
 		return err
 	}
 	defer s.sem.Release(1)
+
+	// only allow maximum 10 "context shifts" to avoid infinite generation
+	if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
+		req.Options.NumPredict = 10 * s.options.NumCtx
+		slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
+	}
+
 	request := map[string]any{
 		"prompt":            req.Prompt,
 		"stream":            true,