瀏覽代碼

quantize progress

Josh Yan 9 月之前
父節點
當前提交
a083852eb5
共有 5 個文件被更改,包括 109 次插入3 次删除
  1. 1 0
      api/types.go
  2. 10 0
      cmd/cmd.go
  3. 44 2
      llm/llm.go
  4. 52 0
      llm/patches/10-quantize-callback.diff
  5. 2 1
      server/images.go

+ 1 - 0
api/types.go

@@ -267,6 +267,7 @@ type PullRequest struct {
 type ProgressResponse struct {
 	Status    string `json:"status"`
 	Digest    string `json:"digest,omitempty"`
+	Quantize  string `json:"quantize,omitempty"`
 	Total     int64  `json:"total,omitempty"`
 	Completed int64  `json:"completed,omitempty"`
 }

+ 10 - 0
cmd/cmd.go

@@ -125,6 +125,7 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
 	}
 
 	bars := make(map[string]*progress.Bar)
+	var quantizeSpin *progress.Spinner
 	fn := func(resp api.ProgressResponse) error {
 		if resp.Digest != "" {
 			spinner.Stop()
@@ -137,6 +138,15 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
 			}
 
 			bar.Set(resp.Completed)
+		} else if resp.Quantize != "" {
+			spinner.Stop()
+
+			if quantizeSpin != nil {
+				quantizeSpin.SetMessage(resp.Status)
+			} else {
+				quantizeSpin = progress.NewSpinner(resp.Status)
+				p.Add("quantize", quantizeSpin)
+			}
 		} else if status != resp.Status {
 			spinner.Stop()
 

+ 44 - 2
llm/llm.go

@@ -10,10 +10,17 @@ package llm
 // #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/linux/arm64_static -L${SRCDIR}/build/linux/arm64_static/src -L${SRCDIR}/build/linux/arm64_static/ggml/src
 // #include <stdlib.h>
 // #include "llama.h"
+// bool update_quantize_progress(float progress, void* data) {
+//  *((float*)data) = progress;
+// 	return true;
+// }
 import "C"
 import (
 	"fmt"
 	"unsafe"
+	"time"
+
+	"github.com/ollama/ollama/api"
 )
 
 // SystemInfo is an unused example of calling llama.cpp functions using CGo
@@ -21,17 +28,52 @@ func SystemInfo() string {
 	return C.GoString(C.llama_print_system_info())
 }
 
-func Quantize(infile, outfile string, ftype fileType) error {
-	cinfile := C.CString(infile)
+func Quantize(infile, outfile string, ftype fileType, fn func(resp api.ProgressResponse), tensorCount int) error {	cinfile := C.CString(infile)
 	defer C.free(unsafe.Pointer(cinfile))
 
 	coutfile := C.CString(outfile)
 	defer C.free(unsafe.Pointer(coutfile))
 
+	
+
 	params := C.llama_model_quantize_default_params()
 	params.nthread = -1
 	params.ftype = ftype.Value()
 
+	// Initialize "global" to store progress
+	store := C.malloc(C.sizeof_float)
+	defer C.free(unsafe.Pointer(store))
+
+	// Initialize store value, e.g., setting initial progress to 0
+	*(*C.float)(store) = 0.0
+
+	params.quantize_callback_data = store
+	params.quantize_callback = (C.llama_progress_callback)(C.update_quantize_progress)
+
+	ticker := time.NewTicker(60 * time.Millisecond)
+	done := make(chan struct{})
+	defer close(done)
+
+	go func() {
+		defer ticker.Stop()
+		for {
+			select {
+			case <-ticker.C:
+				fn(api.ProgressResponse{
+					Status:   fmt.Sprintf("quantizing model %d/%d", int(*((*C.float)(store))), tensorCount),
+					Quantize: "quant",
+				})			
+				fmt.Println("Progress: ", *((*C.float)(store)))
+			case <-done:
+				fn(api.ProgressResponse{
+					Status:   fmt.Sprintf("quantizing model %d/%d", tensorCount, tensorCount),
+					Quantize: "quant",
+				})
+				return
+			}
+		}
+	}()
+
 	if rc := C.llama_model_quantize(cinfile, coutfile, &params); rc != 0 {
 		return fmt.Errorf("llama_model_quantize: %d", rc)
 	}

+ 52 - 0
llm/patches/10-quantize-callback.diff

@@ -0,0 +1,52 @@
+From fa509abf281177eacdc71a2a14432c4e6ed74a47 Mon Sep 17 00:00:00 2001
+From: Josh Yan <jyan00017@gmail.com>
+Date: Wed, 10 Jul 2024 12:58:31 -0700
+Subject: [PATCH] quantize callback
+
+---
+ llama.cpp | 8 ++++++++
+ llama.h   | 3 +++
+ 2 files changed, 11 insertions(+)
+
+diff --git a/llama.cpp b/llama.cpp
+index 61948751..d3126510 100644
+--- a/llama.cpp
++++ b/llama.cpp
+@@ -15586,6 +15586,12 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
+     const auto tn = LLM_TN(model.arch);
+     new_ofstream(0);
+     for (int i = 0; i < ml.n_tensors; ++i) {
++        if (params->quantize_callback){
++            if (!params->quantize_callback(i, params->quantize_callback_data)) {
++                return;
++            }
++        }
++
+         auto weight = ml.get_weight(i);
+         struct ggml_tensor * tensor = weight->tensor;
+         if (weight->idx != cur_split && params->keep_split) {
+@@ -16119,6 +16125,8 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
+         /*.keep_split                  =*/ false,
+         /*.imatrix                     =*/ nullptr,
+         /*.kv_overrides                =*/ nullptr,
++        /*.quantize_callback           =*/ nullptr,
++        /*.quantize_callback_data      =*/ nullptr,
+     };
+
+     return result;
+diff --git a/llama.h b/llama.h
+index da310ffa..3cbe6023 100644
+--- a/llama.h
++++ b/llama.h
+@@ -337,6 +337,9 @@ extern "C" {
+         bool keep_split;                     // quantize to the same number of shards
+         void * imatrix;                      // pointer to importance matrix data
+         void * kv_overrides;                 // pointer to vector containing overrides
++
++        llama_progress_callback quantize_callback;   // callback to report quantization progress
++        void * quantize_callback_data;               // user data for the callback
+     } llama_model_quantize_params;
+
+     // grammar types
+-- 
+2.39.3 (Apple Git-146)

+ 2 - 1
server/images.go

@@ -423,6 +423,7 @@ func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantizatio
 						return err
 					}
 
+					tensorCount := len(baseLayer.GGML.Tensors())
 					ft := baseLayer.GGML.KV().FileType()
 					if !slices.Contains([]string{"F16", "F32"}, ft.String()) {
 						return errors.New("quantization is only supported for F16 and F32 models")
@@ -441,7 +442,7 @@ func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantizatio
 						defer temp.Close()
 						defer os.Remove(temp.Name())
 
-						if err := llm.Quantize(blob, temp.Name(), want); err != nil {
+						if err := llm.Quantize(blob, temp.Name(), want, fn, tensorCount); err != nil {
 							return err
 						}