10-quantize-callback.diff 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152
  1. From fa509abf281177eacdc71a2a14432c4e6ed74a47 Mon Sep 17 00:00:00 2001
  2. From: Josh Yan <jyan00017@gmail.com>
  3. Date: Wed, 10 Jul 2024 12:58:31 -0700
  4. Subject: [PATCH] quantize callback
  5. ---
  6. llama.cpp | 8 ++++++++
  7. llama.h | 3 +++
  8. 2 files changed, 11 insertions(+)
  9. diff --git a/llama.cpp b/llama.cpp
  10. index 61948751..d3126510 100644
  11. --- a/llama.cpp
  12. +++ b/llama.cpp
  13. @@ -15586,6 +15586,12 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
  14. const auto tn = LLM_TN(model.arch);
  15. new_ofstream(0);
  16. for (int i = 0; i < ml.n_tensors; ++i) {
  17. + if (params->quantize_callback){
  18. + if (!params->quantize_callback(i, params->quantize_callback_data)) {
  19. + return;
  20. + }
  21. + }
  22. +
  23. auto weight = ml.get_weight(i);
  24. struct ggml_tensor * tensor = weight->tensor;
  25. if (weight->idx != cur_split && params->keep_split) {
  26. @@ -16119,6 +16125,8 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
  27. /*.keep_split =*/ false,
  28. /*.imatrix =*/ nullptr,
  29. /*.kv_overrides =*/ nullptr,
  30. + /*.quantize_callback =*/ nullptr,
  31. + /*.quantize_callback_data =*/ nullptr,
  32. };
  33. return result;
  34. diff --git a/llama.h b/llama.h
  35. index da310ffa..3cbe6023 100644
  36. --- a/llama.h
  37. +++ b/llama.h
  38. @@ -337,6 +337,9 @@ extern "C" {
  39. bool keep_split; // quantize to the same number of shards
  40. void * imatrix; // pointer to importance matrix data
  41. void * kv_overrides; // pointer to vector containing overrides
  42. +
  43. + llama_progress_callback quantize_callback; // callback to report quantization progress
  44. + void * quantize_callback_data; // user data for the callback
  45. } llama_model_quantize_params;
  46. // grammar types
  47. --
  48. 2.39.3 (Apple Git-146)