12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152 |
- From ed941590d59fc07b1ad21d6aa458588e47d1e446 Mon Sep 17 00:00:00 2001
- From: Josh Yan <jyan00017@gmail.com>
- Date: Wed, 10 Jul 2024 13:39:39 -0700
- Subject: [PATCH] quantize progress
- ---
- include/llama.h | 3 +++
- src/llama.cpp | 8 ++++++++
- 2 files changed, 11 insertions(+)
- diff --git a/include/llama.h b/include/llama.h
- index bb4b05ba..613db68e 100644
- --- a/include/llama.h
- +++ b/include/llama.h
- @@ -349,6 +349,9 @@ extern "C" {
- bool keep_split; // quantize to the same number of shards
- void * imatrix; // pointer to importance matrix data
- void * kv_overrides; // pointer to vector containing overrides
- +
- + llama_progress_callback quantize_callback; // callback to report quantization progress
- + void * quantize_callback_data; // user data for the callback
- } llama_model_quantize_params;
-
- // grammar types
- diff --git a/src/llama.cpp b/src/llama.cpp
- index 2b9ace28..ac640c02 100644
- --- a/src/llama.cpp
- +++ b/src/llama.cpp
- @@ -18252,6 +18252,12 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
- const auto tn = LLM_TN(model.arch);
- new_ofstream(0);
- for (int i = 0; i < ml.n_tensors; ++i) {
- + if (params->quantize_callback){
- + if (!params->quantize_callback(i, params->quantize_callback_data)) {
- + return;
- + }
- + }
- +
- auto weight = ml.get_weight(i);
- struct ggml_tensor * tensor = weight->tensor;
- if (weight->idx != cur_split && params->keep_split) {
- @@ -18789,6 +18795,8 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
- /*.keep_split =*/ false,
- /*.imatrix =*/ nullptr,
- /*.kv_overrides =*/ nullptr,
- + /*.quantize_callback =*/ nullptr,
- + /*.quantize_callback_data =*/ nullptr,
- };
-
- return result;
- --
- 2.39.3 (Apple Git-146)
|