0001-remove-warm-up-logging.patch 804 B

12345678910111213141516171819202122232425
  1. From 07993bdc35345b67b27aa649a7c099ad42d80c4c Mon Sep 17 00:00:00 2001
  2. From: Michael Yang <mxyng@pm.me>
  3. Date: Thu, 21 Sep 2023 14:43:21 -0700
  4. Subject: [PATCH] remove warm up logging
  5. ---
  6. common/common.cpp | 2 --
  7. 1 file changed, 2 deletions(-)
  8. diff --git a/common/common.cpp b/common/common.cpp
  9. index 2597ba0..b56549b 100644
  10. --- a/common/common.cpp
  11. +++ b/common/common.cpp
  12. @@ -780,8 +780,6 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
  13. }
  14. {
  15. - LOG("warming up the model with an empty run\n");
  16. -
  17. const std::vector<llama_token> tmp = { llama_token_bos(lctx), llama_token_eos(lctx), };
  18. llama_eval(lctx, tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, params.n_threads);
  19. llama_reset_timings(lctx);
  20. --
  21. 2.42.0