llm.go 3.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. package llm
  2. // #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include// #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread
  3. // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal
  4. // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src
  5. // #cgo windows,amd64 LDFLAGS: -static-libstdc++ -static-libgcc -static -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src
  6. // #cgo windows,arm64 LDFLAGS: -static-libstdc++ -static-libgcc -static -L${SRCDIR}/build/windows/arm64_static -L${SRCDIR}/build/windows/arm64_static/src -L${SRCDIR}/build/windows/arm64_static/ggml/src
  7. // #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/linux/x86_64_static -L${SRCDIR}/build/linux/x86_64_static/src -L${SRCDIR}/build/linux/x86_64_static/ggml/src
  8. // #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/linux/arm64_static -L${SRCDIR}/build/linux/arm64_static/src -L${SRCDIR}/build/linux/arm64_static/ggml/src
  9. // #include <stdlib.h>
  10. // #include <stdatomic.h>
  11. // #include "llama.h"
  12. // bool update_quantize_progress(float progress, void* data) {
  13. // atomic_int* atomicData = (atomic_int*)data;
  14. // int intProgress = *((int*)&progress);
  15. // atomic_store(atomicData, intProgress);
  16. // return true;
  17. // }
  18. import "C"
  19. import (
  20. "fmt"
  21. "sync/atomic"
  22. "time"
  23. "unsafe"
  24. "github.com/ollama/ollama/api"
  25. )
  26. // SystemInfo is an unused example of calling llama.cpp functions using CGo
  27. func SystemInfo() string {
  28. return C.GoString(C.llama_print_system_info())
  29. }
  30. func Quantize(infile, outfile string, ftype fileType, fn func(resp api.ProgressResponse), tensorCount int) error {
  31. cinfile := C.CString(infile)
  32. defer C.free(unsafe.Pointer(cinfile))
  33. coutfile := C.CString(outfile)
  34. defer C.free(unsafe.Pointer(coutfile))
  35. params := C.llama_model_quantize_default_params()
  36. params.nthread = -1
  37. params.ftype = ftype.Value()
  38. // Initialize "global" to store progress
  39. store := (*int32)(C.malloc(C.sizeof_int))
  40. defer C.free(unsafe.Pointer(store))
  41. // Initialize store value, e.g., setting initial progress to 0
  42. atomic.StoreInt32(store, 0)
  43. params.quantize_callback_data = unsafe.Pointer(store)
  44. params.quantize_callback = (C.llama_progress_callback)(C.update_quantize_progress)
  45. ticker := time.NewTicker(30 * time.Millisecond)
  46. done := make(chan struct{})
  47. defer close(done)
  48. go func() {
  49. defer ticker.Stop()
  50. for {
  51. select {
  52. case <-ticker.C:
  53. progressInt := atomic.LoadInt32(store)
  54. progress := *(*float32)(unsafe.Pointer(&progressInt))
  55. fn(api.ProgressResponse{
  56. Status: fmt.Sprintf("quantizing model tensors %d/%d", int(progress), tensorCount),
  57. Type: "quantize",
  58. })
  59. fmt.Println("Progress: ", progress)
  60. case <-done:
  61. fn(api.ProgressResponse{
  62. Status: fmt.Sprintf("quantizing model tensors %d/%d", tensorCount, tensorCount),
  63. Type: "quantize",
  64. })
  65. return
  66. }
  67. }
  68. }()
  69. if rc := C.llama_model_quantize(cinfile, coutfile, &params); rc != 0 {
  70. return errors.New("failed to quantize model. This model architecture may not be supported, or you may need to upgrade Ollama to the latest version")
  71. }
  72. return nil
  73. }