Procházet zdrojové kódy

update default log target

Michael Yang před 1 rokem
rodič
revize
c9167494cb

+ 1 - 1
llm/llama.cpp/generate_darwin_amd64.go

@@ -12,7 +12,7 @@ package llm
 //go:generate mv ggml/build/cpu/bin/server ggml/build/cpu/bin/ollama-runner
 //go:generate mv ggml/build/cpu/bin/server ggml/build/cpu/bin/ollama-runner
 
 
 //go:generate git submodule update --force gguf
 //go:generate git submodule update --force gguf
-//go:generate git -C gguf apply ../patches/0001-remove-warm-up-logging.patch
+//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
 //go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
 //go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
 //go:generate cmake --build gguf/build/cpu --target server --config Release
 //go:generate cmake --build gguf/build/cpu --target server --config Release
 //go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner
 //go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner

+ 1 - 1
llm/llama.cpp/generate_darwin_arm64.go

@@ -12,7 +12,7 @@ package llm
 //go:generate mv ggml/build/metal/bin/server ggml/build/metal/bin/ollama-runner
 //go:generate mv ggml/build/metal/bin/server ggml/build/metal/bin/ollama-runner
 
 
 //go:generate git submodule update --force gguf
 //go:generate git submodule update --force gguf
-//go:generate git -C gguf apply ../patches/0001-remove-warm-up-logging.patch
+//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
 //go:generate cmake -S gguf -B gguf/build/metal -DLLAMA_METAL=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=arm64 -DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
 //go:generate cmake -S gguf -B gguf/build/metal -DLLAMA_METAL=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=arm64 -DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
 //go:generate cmake --build gguf/build/metal --target server --config Release
 //go:generate cmake --build gguf/build/metal --target server --config Release
 //go:generate mv gguf/build/metal/bin/server gguf/build/metal/bin/ollama-runner
 //go:generate mv gguf/build/metal/bin/server gguf/build/metal/bin/ollama-runner

+ 1 - 1
llm/llama.cpp/generate_linux.go

@@ -13,7 +13,7 @@ package llm
 
 
 //go:generate git submodule update --force gguf
 //go:generate git submodule update --force gguf
 //go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
 //go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
-//go:generate git -C gguf apply ../patches/0001-remove-warm-up-logging.patch
+//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
 //go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on
 //go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on
 //go:generate cmake --build gguf/build/cpu --target server --config Release
 //go:generate cmake --build gguf/build/cpu --target server --config Release
 //go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner
 //go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner

+ 1 - 1
llm/llama.cpp/generate_windows.go

@@ -10,7 +10,7 @@ package llm
 //go:generate cmd /c move ggml\build\cpu\bin\Release\server.exe ggml\build\cpu\bin\Release\ollama-runner.exe
 //go:generate cmd /c move ggml\build\cpu\bin\Release\server.exe ggml\build\cpu\bin\Release\ollama-runner.exe
 
 
 //go:generate git submodule update --force gguf
 //go:generate git submodule update --force gguf
-//go:generate git -C gguf apply ../patches/0001-remove-warm-up-logging.patch
+//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
 //go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on
 //go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on
 //go:generate cmake --build gguf/build/cpu --target server --config Release
 //go:generate cmake --build gguf/build/cpu --target server --config Release
 //go:generate cmd /c move gguf\build\cpu\bin\Release\server.exe gguf\build\cpu\bin\Release\ollama-runner.exe
 //go:generate cmd /c move gguf\build\cpu\bin\Release\server.exe gguf\build\cpu\bin\Release\ollama-runner.exe

+ 0 - 25
llm/llama.cpp/patches/0001-remove-warm-up-logging.patch

@@ -1,25 +0,0 @@
-From 8dbb5449db259a9c24796e7927d89bee98b6c8f5 Mon Sep 17 00:00:00 2001
-From: Bruce MacDonald <brucewmacdonald@gmail.com>
-Date: Thu, 5 Oct 2023 11:21:12 -0400
-Subject: [PATCH] remove warm up logging
-
----
- common/common.cpp | 2 --
- 1 file changed, 2 deletions(-)
-
-diff --git a/common/common.cpp b/common/common.cpp
-index 7370017..c4433fe 100644
---- a/common/common.cpp
-+++ b/common/common.cpp
-@@ -839,8 +839,6 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
-     }
- 
-     {
--        LOG("warming up the model with an empty run\n");
--
-         std::vector<llama_token> tmp = { llama_token_bos(lctx), llama_token_eos(lctx), };
-         llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0));
-         llama_kv_cache_tokens_rm(lctx, -1, -1);
--- 
-2.39.2 (Apple Git-143)
-

+ 25 - 0
llm/llama.cpp/patches/0001-update-default-log-target.patch

@@ -0,0 +1,25 @@
+From 6465fec6290f0a7f5d4d0fbe6bcf634e4810dde6 Mon Sep 17 00:00:00 2001
+From: Michael Yang <mxyng@pm.me>
+Date: Mon, 23 Oct 2023 10:39:34 -0700
+Subject: [PATCH] default log stderr
+
+---
+ common/log.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/common/log.h b/common/log.h
+index b8953fd..25522cd 100644
+--- a/common/log.h
++++ b/common/log.h
+@@ -90,7 +90,7 @@
+ //  }
+ //
+ #ifndef LOG_TARGET
+-    #define LOG_TARGET log_handler()
++    #define LOG_TARGET nullptr
+ #endif
+ 
+ #ifndef LOG_TEE_TARGET
+-- 
+2.42.0
+