01-load-progress.diff 1.4 KB

12345678910111213141516171819202122232425262728293031
  1. diff --git a/common/common.cpp b/common/common.cpp
  2. index 73ff0e85..6adb1a92 100644
  3. --- a/common/common.cpp
  4. +++ b/common/common.cpp
  5. @@ -2447,6 +2447,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
  6. mparams.use_mmap = params.use_mmap;
  7. mparams.use_mlock = params.use_mlock;
  8. mparams.check_tensors = params.check_tensors;
  9. + mparams.progress_callback = params.progress_callback;
  10. + mparams.progress_callback_user_data = params.progress_callback_user_data;
  11. if (params.kv_overrides.empty()) {
  12. mparams.kv_overrides = NULL;
  13. } else {
  14. diff --git a/common/common.h b/common/common.h
  15. index 58ed72f4..0bb2605e 100644
  16. --- a/common/common.h
  17. +++ b/common/common.h
  18. @@ -180,6 +180,13 @@ struct gpt_params {
  19. std::string mmproj = ""; // path to multimodal projector
  20. std::vector<std::string> image; // path to image file(s)
  21. + // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
  22. + // If the provided progress_callback returns true, model loading continues.
  23. + // If it returns false, model loading is immediately aborted.
  24. + llama_progress_callback progress_callback = NULL;
  25. + // context pointer passed to the progress callback
  26. + void * progress_callback_user_data;
  27. +
  28. // server params
  29. int32_t port = 8080; // server listens on this network port
  30. int32_t timeout_read = 600; // http read timeout in seconds