瀏覽代碼

fix the cpu estimatedTotal memory + get the expiry time for loading models

Patrick Devine 11 月之前
父節點
當前提交
b73a512f24
共有 2 個文件被更改,包括 9 次插入0 次删除
  1. 1 0
      llm/server.go
  2. 8 0
      server/routes.go

+ 1 - 0
llm/server.go

@@ -89,6 +89,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
 
 		cpuRunner = serverForCpu()
 		gpuCount = 0
+		_, _, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
 	} else {
 		if gpus[0].Library == "metal" {
 			memInfo, err := gpu.GetCPUMem()

+ 8 - 0
server/routes.go

@@ -1161,6 +1161,14 @@ func (s *Server) ProcessHandler(c *gin.Context) {
 			Details:   modelDetails,
 			ExpiresAt: v.expiresAt,
 		}
+		// The scheduler waits to set expiresAt, so if a model is loading it's
+		// possible that it will be set to the unix epoch. For those cases, just
+		// calculate the time w/ the sessionDuration instead.
+		var epoch time.Time
+		if v.expiresAt == epoch {
+			mr.ExpiresAt = time.Now().Add(v.sessionDuration)
+		}
+
 		models = append(models, mr)
 	}