quantize.diff 3.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576
  1. commit c260daa84166c568cd998410dc9ba5628c530bee
  2. Author: Josh Yan <jyan00017@gmail.com>
  3. Date: Tue Jul 9 15:34:24 2024 -0700
  4. quantize progress
  5. diff --git a/llama.cpp b/llama.cpp
  6. index 61948751..c06d31b6 100644
  7. --- a/llama.cpp
  8. +++ b/llama.cpp
  9. @@ -15370,7 +15370,7 @@ static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const floa
  10. return new_size;
  11. }
  12. -static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
  13. +static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, llama_model_quantize_params * params) {
  14. ggml_type default_type;
  15. llama_ftype ftype = params->ftype;
  16. @@ -15586,6 +15586,15 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
  17. const auto tn = LLM_TN(model.arch);
  18. new_ofstream(0);
  19. for (int i = 0; i < ml.n_tensors; ++i) {
  20. +
  21. + if (params->quantize_callback){
  22. + if (!params->quantize_callback(i/ml.n_tensors, params->quantize_callback_data)) {
  23. + close_ofstream();
  24. + params->quantize_callback_data = nullptr;
  25. + return;
  26. + }
  27. + }
  28. +
  29. auto weight = ml.get_weight(i);
  30. struct ggml_tensor * tensor = weight->tensor;
  31. if (weight->idx != cur_split && params->keep_split) {
  32. @@ -16119,6 +16128,8 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
  33. /*.keep_split =*/ false,
  34. /*.imatrix =*/ nullptr,
  35. /*.kv_overrides =*/ nullptr,
  36. + /*.quantize_callback =*/ nullptr,
  37. + /*.quantize_callback_data =*/ nullptr,
  38. };
  39. return result;
  40. @@ -16784,7 +16795,7 @@ struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const ch
  41. uint32_t llama_model_quantize(
  42. const char * fname_inp,
  43. const char * fname_out,
  44. - const llama_model_quantize_params * params) {
  45. + llama_model_quantize_params * params) {
  46. try {
  47. llama_model_quantize_internal(fname_inp, fname_out, params);
  48. return 0;
  49. diff --git a/llama.h b/llama.h
  50. index da310ffa..847c40d4 100644
  51. --- a/llama.h
  52. +++ b/llama.h
  53. @@ -196,6 +196,8 @@ extern "C" {
  54. typedef bool (*llama_progress_callback)(float progress, void * user_data);
  55. + typedef bool (*llama_quantize_callback)(int progress, void * user_data);
  56. +
  57. // Input data for llama_decode
  58. // A llama_batch object can contain input about one or many sequences
  59. // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
  60. @@ -337,6 +339,9 @@ extern "C" {
  61. bool keep_split; // quantize to the same number of shards
  62. void * imatrix; // pointer to importance matrix data
  63. void * kv_overrides; // pointer to vector containing overrides
  64. +
  65. + llama_quantize_callback quantize_callback;
  66. + void * quantize_callback_data;
  67. } llama_model_quantize_params;
  68. // grammar types