|
@@ -1,8 +1,8 @@
|
|
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
|
|
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
|
|
-index 7800c6e7..be30db23 100644
|
|
|
|
|
|
+index 2b2f4a0f..25857bdd 100644
|
|
--- a/examples/server/server.cpp
|
|
--- a/examples/server/server.cpp
|
|
+++ b/examples/server/server.cpp
|
|
+++ b/examples/server/server.cpp
|
|
-@@ -30,6 +30,10 @@
|
|
|
|
|
|
+@@ -31,6 +31,10 @@
|
|
#include <atomic>
|
|
#include <atomic>
|
|
#include <signal.h>
|
|
#include <signal.h>
|
|
|
|
|
|
@@ -12,8 +12,8 @@ index 7800c6e7..be30db23 100644
|
|
+
|
|
+
|
|
using json = nlohmann::json;
|
|
using json = nlohmann::json;
|
|
|
|
|
|
- struct server_params
|
|
|
|
-@@ -353,6 +357,9 @@ struct llama_server_context
|
|
|
|
|
|
+ struct server_params {
|
|
|
|
+@@ -363,6 +367,9 @@ struct llama_server_context
|
|
llama_free_model(model);
|
|
llama_free_model(model);
|
|
model = nullptr;
|
|
model = nullptr;
|
|
}
|
|
}
|
|
@@ -23,7 +23,7 @@ index 7800c6e7..be30db23 100644
|
|
}
|
|
}
|
|
|
|
|
|
bool load_model(const gpt_params ¶ms_)
|
|
bool load_model(const gpt_params ¶ms_)
|
|
-@@ -3143,6 +3150,7 @@ int main(int argc, char **argv)
|
|
|
|
|
|
+@@ -3494,6 +3501,7 @@ int main(int argc, char **argv)
|
|
sigemptyset (&sigint_action.sa_mask);
|
|
sigemptyset (&sigint_action.sa_mask);
|
|
sigint_action.sa_flags = 0;
|
|
sigint_action.sa_flags = 0;
|
|
sigaction(SIGINT, &sigint_action, NULL);
|
|
sigaction(SIGINT, &sigint_action, NULL);
|
|
@@ -32,10 +32,10 @@ index 7800c6e7..be30db23 100644
|
|
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
|
|
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
|
|
return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
|
|
return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
|
|
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
|
|
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
|
|
-index 933ebbc4..88a4f664 100644
|
|
|
|
|
|
+index 0c6501e9..75c12723 100644
|
|
--- a/ggml-cuda.cu
|
|
--- a/ggml-cuda.cu
|
|
+++ b/ggml-cuda.cu
|
|
+++ b/ggml-cuda.cu
|
|
-@@ -39,6 +39,7 @@
|
|
|
|
|
|
+@@ -43,6 +43,7 @@
|
|
#define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width)
|
|
#define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width)
|
|
#define cublasComputeType_t hipblasDatatype_t //deprecated, new hipblasComputeType_t not in 5.6
|
|
#define cublasComputeType_t hipblasDatatype_t //deprecated, new hipblasComputeType_t not in 5.6
|
|
#define cublasCreate hipblasCreate
|
|
#define cublasCreate hipblasCreate
|
|
@@ -43,7 +43,7 @@ index 933ebbc4..88a4f664 100644
|
|
#define cublasGemmEx hipblasGemmEx
|
|
#define cublasGemmEx hipblasGemmEx
|
|
#define cublasGemmBatchedEx hipblasGemmBatchedEx
|
|
#define cublasGemmBatchedEx hipblasGemmBatchedEx
|
|
#define cublasGemmStridedBatchedEx hipblasGemmStridedBatchedEx
|
|
#define cublasGemmStridedBatchedEx hipblasGemmStridedBatchedEx
|
|
-@@ -7991,10 +7992,10 @@ GGML_CALL bool ggml_cublas_loaded(void) {
|
|
|
|
|
|
+@@ -8694,10 +8695,10 @@ GGML_CALL bool ggml_cublas_loaded(void) {
|
|
return g_cublas_loaded;
|
|
return g_cublas_loaded;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -57,7 +57,7 @@ index 933ebbc4..88a4f664 100644
|
|
|
|
|
|
#ifdef __HIP_PLATFORM_AMD__
|
|
#ifdef __HIP_PLATFORM_AMD__
|
|
// Workaround for a rocBLAS bug when using multiple graphics cards:
|
|
// Workaround for a rocBLAS bug when using multiple graphics cards:
|
|
-@@ -8004,7 +8005,7 @@ GGML_CALL void ggml_init_cublas() {
|
|
|
|
|
|
+@@ -8707,7 +8708,7 @@ GGML_CALL void ggml_init_cublas() {
|
|
#endif
|
|
#endif
|
|
|
|
|
|
if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) {
|
|
if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) {
|
|
@@ -66,7 +66,7 @@ index 933ebbc4..88a4f664 100644
|
|
g_cublas_loaded = false;
|
|
g_cublas_loaded = false;
|
|
fprintf(stderr, "%s: no " GGML_CUDA_NAME " devices found, " GGML_CUDA_NAME " will be disabled\n", __func__);
|
|
fprintf(stderr, "%s: no " GGML_CUDA_NAME " devices found, " GGML_CUDA_NAME " will be disabled\n", __func__);
|
|
return;
|
|
return;
|
|
-@@ -8075,7 +8076,7 @@ GGML_CALL void ggml_init_cublas() {
|
|
|
|
|
|
+@@ -8778,7 +8779,7 @@ GGML_CALL void ggml_init_cublas() {
|
|
// configure logging to stdout
|
|
// configure logging to stdout
|
|
// CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr));
|
|
// CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr));
|
|
|
|
|
|
@@ -75,12 +75,11 @@ index 933ebbc4..88a4f664 100644
|
|
g_cublas_loaded = true;
|
|
g_cublas_loaded = true;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
-@@ -11604,3 +11605,23 @@ GGML_CALL int ggml_backend_cuda_reg_devices() {
|
|
|
|
|
|
+@@ -12345,3 +12346,22 @@ GGML_CALL int ggml_backend_cuda_reg_devices() {
|
|
}
|
|
}
|
|
return device_count;
|
|
return device_count;
|
|
}
|
|
}
|
|
+
|
|
+
|
|
-+
|
|
|
|
+extern "C" GGML_CALL void ggml_free_cublas(void);
|
|
+extern "C" GGML_CALL void ggml_free_cublas(void);
|
|
+GGML_CALL void ggml_free_cublas(void) {
|
|
+GGML_CALL void ggml_free_cublas(void) {
|
|
+ for (int id = 0; id < g_device_count; ++id) {
|
|
+ for (int id = 0; id < g_device_count; ++id) {
|
|
@@ -100,16 +99,16 @@ index 933ebbc4..88a4f664 100644
|
|
+ g_cublas_initialized = false;
|
|
+ g_cublas_initialized = false;
|
|
+}
|
|
+}
|
|
diff --git a/ggml-cuda.h b/ggml-cuda.h
|
|
diff --git a/ggml-cuda.h b/ggml-cuda.h
|
|
-index b1ebd61d..b4c80c2c 100644
|
|
|
|
|
|
+index b1ebd61d..6dd58ddf 100644
|
|
--- a/ggml-cuda.h
|
|
--- a/ggml-cuda.h
|
|
+++ b/ggml-cuda.h
|
|
+++ b/ggml-cuda.h
|
|
-@@ -20,6 +20,9 @@ extern "C" {
|
|
|
|
- // Always success. To check if CUDA is actually loaded, use `ggml_cublas_loaded`.
|
|
|
|
- GGML_API GGML_CALL void ggml_init_cublas(void);
|
|
|
|
|
|
+@@ -23,6 +23,9 @@ GGML_API GGML_CALL void ggml_init_cublas(void);
|
|
|
|
+ // Returns `true` if there are available CUDA devices and cublas loads successfully; otherwise, it returns `false`.
|
|
|
|
+ GGML_API GGML_CALL bool ggml_cublas_loaded(void);
|
|
|
|
|
|
+// Release CUDA resources
|
|
+// Release CUDA resources
|
|
+GGML_API GGML_CALL void ggml_free_cublas(void);
|
|
+GGML_API GGML_CALL void ggml_free_cublas(void);
|
|
+
|
|
+
|
|
- // Returns `true` if there are available CUDA devices and cublas loads successfully; otherwise, it returns `false`.
|
|
|
|
- GGML_API GGML_CALL bool ggml_cublas_loaded(void);
|
|
|
|
|
|
+ GGML_API GGML_CALL void * ggml_cuda_host_malloc(size_t size);
|
|
|
|
+ GGML_API GGML_CALL void ggml_cuda_host_free(void * ptr);
|
|
|
|
|