02-cudaleaks.diff 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. diff --git a/examples/server/server.cpp b/examples/server/server.cpp
  2. index b14cca61..02bfd4b1 100644
  3. --- a/examples/server/server.cpp
  4. +++ b/examples/server/server.cpp
  5. @@ -29,6 +29,10 @@
  6. #include <signal.h>
  7. #include <memory>
  8. +#ifdef GGML_USE_CUBLAS
  9. +extern "C" GGML_CALL void ggml_free_cublas(void);
  10. +#endif
  11. +
  12. using json = nlohmann::json;
  13. bool server_verbose = false;
  14. @@ -664,6 +668,10 @@ struct server_context {
  15. llama_free_model(model);
  16. model = nullptr;
  17. }
  18. +
  19. +#ifdef GGML_USE_CUBLAS
  20. + ggml_free_cublas();
  21. +#endif
  22. }
  23. bool load_model(const gpt_params & params_) {
  24. @@ -3499,6 +3507,7 @@ int main(int argc, char ** argv) {
  25. sigemptyset (&sigint_action.sa_mask);
  26. sigint_action.sa_flags = 0;
  27. sigaction(SIGINT, &sigint_action, NULL);
  28. + sigaction(SIGUSR1, &sigint_action, NULL);
  29. #elif defined (_WIN32)
  30. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  31. return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
  32. diff --git a/ggml-cuda.cu b/ggml-cuda.cu
  33. index c207ff87..945708a4 100644
  34. --- a/ggml-cuda.cu
  35. +++ b/ggml-cuda.cu
  36. @@ -46,6 +46,7 @@
  37. #define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width)
  38. #define cublasComputeType_t hipblasDatatype_t //deprecated, new hipblasComputeType_t not in 5.6
  39. #define cublasCreate hipblasCreate
  40. +#define cublasDestroy hipblasDestroy
  41. #define cublasGemmEx hipblasGemmEx
  42. #define cublasGemmBatchedEx hipblasGemmBatchedEx
  43. #define cublasGemmStridedBatchedEx hipblasGemmStridedBatchedEx
  44. @@ -8014,10 +8015,10 @@ GGML_CALL bool ggml_cublas_loaded(void) {
  45. return g_cublas_loaded;
  46. }
  47. -GGML_CALL void ggml_init_cublas() {
  48. - static bool initialized = false;
  49. +static bool g_cublas_initialized = false;
  50. - if (!initialized) {
  51. +GGML_CALL void ggml_init_cublas() {
  52. + if (!g_cublas_initialized) {
  53. #ifdef __HIP_PLATFORM_AMD__
  54. // Workaround for a rocBLAS bug when using multiple graphics cards:
  55. @@ -8027,7 +8028,7 @@ GGML_CALL void ggml_init_cublas() {
  56. #endif
  57. if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) {
  58. - initialized = true;
  59. + g_cublas_initialized = true;
  60. g_cublas_loaded = false;
  61. fprintf(stderr, "%s: no " GGML_CUDA_NAME " devices found, " GGML_CUDA_NAME " will be disabled\n", __func__);
  62. return;
  63. @@ -8098,7 +8099,7 @@ GGML_CALL void ggml_init_cublas() {
  64. // configure logging to stdout
  65. // CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr));
  66. - initialized = true;
  67. + g_cublas_initialized = true;
  68. g_cublas_loaded = true;
  69. }
  70. }
  71. @@ -11753,3 +11754,23 @@ GGML_CALL int ggml_backend_cuda_reg_devices() {
  72. }
  73. return device_count;
  74. }
  75. +
  76. +
  77. +extern "C" GGML_CALL void ggml_free_cublas(void);
  78. +GGML_CALL void ggml_free_cublas(void) {
  79. + for (int id = 0; id < g_device_count; ++id) {
  80. +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
  81. + if (g_device_caps[id].vmm) {
  82. + CU_CHECK(cuMemUnmap(g_cuda_pool_addr[id], g_cuda_pool_size[id]));
  83. + g_cuda_pool_size[id] = 0;
  84. + g_cuda_pool_addr[id] = 0;
  85. + }
  86. +#endif
  87. + // TODO: free legacy non-vmm memory
  88. + // destroy cublas handle
  89. + CUBLAS_CHECK(cublasDestroy(g_cublas_handles[id]));
  90. + g_cublas_handles[id] = nullptr;
  91. + }
  92. +
  93. + g_cublas_initialized = false;
  94. +}
  95. diff --git a/ggml-cuda.h b/ggml-cuda.h
  96. index b1ebd61d..6dd58ddf 100644
  97. --- a/ggml-cuda.h
  98. +++ b/ggml-cuda.h
  99. @@ -23,6 +23,9 @@ GGML_API GGML_CALL void ggml_init_cublas(void);
  100. // Returns `true` if there are available CUDA devices and cublas loads successfully; otherwise, it returns `false`.
  101. GGML_API GGML_CALL bool ggml_cublas_loaded(void);
  102. +// Release CUDA resources
  103. +GGML_API GGML_CALL void ggml_free_cublas(void);
  104. +
  105. GGML_API GGML_CALL void * ggml_cuda_host_malloc(size_t size);
  106. GGML_API GGML_CALL void ggml_cuda_host_free(void * ptr);