Browse Source

server: Fix double free on runner subprocess error.

If the runner subprocess encounters an error, it will close the HTTP
connect, which causes Ollama to free the instance of the model that has
open. When Ollama exits, it will again try to free the models for all
of the runners that were open, resulting in a double free.
Jesse Gross 8 months ago
parent
commit
ebdf781397
1 changed files with 2 additions and 0 deletions
  1. 2 0
      llm/server.go

+ 2 - 0
llm/server.go

@@ -978,6 +978,7 @@ func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error
 func (s *llmServer) Close() error {
 	if s.model != nil {
 		freeModel(s.model)
+		s.model = nil
 	}
 	if s.cmd != nil {
 		slog.Debug("stopping llama server")
@@ -989,6 +990,7 @@ func (s *llmServer) Close() error {
 			slog.Debug("waiting for llama server to exit")
 			<-s.done
 		}
+		s.cmd = nil
 
 		slog.Debug("llama server stopped")
 	}