gpu.go 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. //go:build linux || windows
  2. package gpu
  3. /*
  4. #cgo linux LDFLAGS: -lrt -lpthread -ldl -lstdc++ -lm
  5. #cgo windows LDFLAGS: -lpthread
  6. #include "gpu_info.h"
  7. */
  8. import "C"
  9. import (
  10. "fmt"
  11. "log/slog"
  12. "os"
  13. "path/filepath"
  14. "runtime"
  15. "strings"
  16. "sync"
  17. "unsafe"
  18. )
  19. type handles struct {
  20. cuda *C.cuda_handle_t
  21. rocm *C.rocm_handle_t
  22. }
  23. var gpuMutex sync.Mutex
  24. var gpuHandles *handles = nil
  25. // With our current CUDA compile flags, 5.2 and older will not work properly
  26. const CudaComputeMajorMin = 6
  27. // Possible locations for the nvidia-ml library
  28. var CudaLinuxGlobs = []string{
  29. "/usr/local/cuda/lib64/libnvidia-ml.so*",
  30. "/usr/lib/x86_64-linux-gnu/nvidia/current/libnvidia-ml.so*",
  31. "/usr/lib/x86_64-linux-gnu/libnvidia-ml.so*",
  32. "/usr/lib/wsl/lib/libnvidia-ml.so*",
  33. "/opt/cuda/lib64/libnvidia-ml.so*",
  34. "/opt/cuda/targets/x86_64-linux/lib/stubs/libnvidia-ml.so*",
  35. "/usr/lib*/libnvidia-ml.so*",
  36. "/usr/local/lib*/libnvidia-ml.so*",
  37. "/usr/lib/aarch64-linux-gnu/nvidia/current/libnvidia-ml.so*",
  38. "/usr/lib/aarch64-linux-gnu/libnvidia-ml.so*",
  39. }
  40. var CudaWindowsGlobs = []string{
  41. "c:\\Windows\\System32\\nvml.dll",
  42. }
  43. var RocmLinuxGlobs = []string{
  44. "/opt/rocm*/lib*/librocm_smi64.so*",
  45. }
  46. var RocmWindowsGlobs = []string{
  47. "c:\\Windows\\System32\\rocm_smi64.dll",
  48. }
  49. // Note: gpuMutex must already be held
  50. func initGPUHandles() {
  51. // TODO - if the ollama build is CPU only, don't do these checks as they're irrelevant and confusing
  52. var cudaMgmtName string
  53. var cudaMgmtPatterns []string
  54. var rocmMgmtName string
  55. var rocmMgmtPatterns []string
  56. switch runtime.GOOS {
  57. case "windows":
  58. cudaMgmtName = "nvml.dll"
  59. cudaMgmtPatterns = make([]string, len(CudaWindowsGlobs))
  60. copy(cudaMgmtPatterns, CudaWindowsGlobs)
  61. rocmMgmtName = "rocm_smi64.dll"
  62. rocmMgmtPatterns = make([]string, len(RocmWindowsGlobs))
  63. copy(rocmMgmtPatterns, RocmWindowsGlobs)
  64. case "linux":
  65. cudaMgmtName = "libnvidia-ml.so"
  66. cudaMgmtPatterns = make([]string, len(CudaLinuxGlobs))
  67. copy(cudaMgmtPatterns, CudaLinuxGlobs)
  68. rocmMgmtName = "librocm_smi64.so"
  69. rocmMgmtPatterns = make([]string, len(RocmLinuxGlobs))
  70. copy(rocmMgmtPatterns, RocmLinuxGlobs)
  71. default:
  72. return
  73. }
  74. slog.Info("Detecting GPU type")
  75. gpuHandles = &handles{nil, nil}
  76. cudaLibPaths := FindGPULibs(cudaMgmtName, cudaMgmtPatterns)
  77. if len(cudaLibPaths) > 0 {
  78. cuda := LoadCUDAMgmt(cudaLibPaths)
  79. if cuda != nil {
  80. slog.Info("Nvidia GPU detected")
  81. gpuHandles.cuda = cuda
  82. return
  83. }
  84. }
  85. rocmLibPaths := FindGPULibs(rocmMgmtName, rocmMgmtPatterns)
  86. if len(rocmLibPaths) > 0 {
  87. rocm := LoadROCMMgmt(rocmLibPaths)
  88. if rocm != nil {
  89. slog.Info("Radeon GPU detected")
  90. gpuHandles.rocm = rocm
  91. return
  92. }
  93. }
  94. }
  95. func GetGPUInfo() GpuInfo {
  96. // TODO - consider exploring lspci (and equivalent on windows) to check for
  97. // GPUs so we can report warnings if we see Nvidia/AMD but fail to load the libraries
  98. gpuMutex.Lock()
  99. defer gpuMutex.Unlock()
  100. if gpuHandles == nil {
  101. initGPUHandles()
  102. }
  103. var memInfo C.mem_info_t
  104. resp := GpuInfo{}
  105. if gpuHandles.cuda != nil {
  106. C.cuda_check_vram(*gpuHandles.cuda, &memInfo)
  107. if memInfo.err != nil {
  108. slog.Info(fmt.Sprintf("error looking up CUDA GPU memory: %s", C.GoString(memInfo.err)))
  109. C.free(unsafe.Pointer(memInfo.err))
  110. } else {
  111. // Verify minimum compute capability
  112. var cc C.cuda_compute_capability_t
  113. C.cuda_compute_capability(*gpuHandles.cuda, &cc)
  114. if cc.err != nil {
  115. slog.Info(fmt.Sprintf("error looking up CUDA GPU compute capability: %s", C.GoString(cc.err)))
  116. C.free(unsafe.Pointer(cc.err))
  117. } else if cc.major >= CudaComputeMajorMin {
  118. slog.Info(fmt.Sprintf("CUDA Compute Capability detected: %d.%d", cc.major, cc.minor))
  119. resp.Library = "cuda"
  120. } else {
  121. slog.Info(fmt.Sprintf("CUDA GPU is too old. Falling back to CPU mode. Compute Capability detected: %d.%d", cc.major, cc.minor))
  122. }
  123. }
  124. } else if gpuHandles.rocm != nil {
  125. C.rocm_check_vram(*gpuHandles.rocm, &memInfo)
  126. if memInfo.err != nil {
  127. slog.Info(fmt.Sprintf("error looking up ROCm GPU memory: %s", C.GoString(memInfo.err)))
  128. C.free(unsafe.Pointer(memInfo.err))
  129. } else {
  130. resp.Library = "rocm"
  131. var version C.rocm_version_resp_t
  132. C.rocm_get_version(*gpuHandles.rocm, &version)
  133. verString := C.GoString(version.str)
  134. if version.status == 0 {
  135. resp.Variant = "v" + verString
  136. } else {
  137. slog.Info(fmt.Sprintf("failed to look up ROCm version: %s", verString))
  138. }
  139. C.free(unsafe.Pointer(version.str))
  140. }
  141. }
  142. if resp.Library == "" {
  143. C.cpu_check_ram(&memInfo)
  144. resp.Library = "cpu"
  145. resp.Variant = GetCPUVariant()
  146. }
  147. if memInfo.err != nil {
  148. slog.Info(fmt.Sprintf("error looking up CPU memory: %s", C.GoString(memInfo.err)))
  149. C.free(unsafe.Pointer(memInfo.err))
  150. return resp
  151. }
  152. resp.DeviceCount = uint32(memInfo.count)
  153. resp.FreeMemory = uint64(memInfo.free)
  154. resp.TotalMemory = uint64(memInfo.total)
  155. return resp
  156. }
  157. func getCPUMem() (memInfo, error) {
  158. var ret memInfo
  159. var info C.mem_info_t
  160. C.cpu_check_ram(&info)
  161. if info.err != nil {
  162. defer C.free(unsafe.Pointer(info.err))
  163. return ret, fmt.Errorf(C.GoString(info.err))
  164. }
  165. ret.FreeMemory = uint64(info.free)
  166. ret.TotalMemory = uint64(info.total)
  167. return ret, nil
  168. }
  169. func CheckVRAM() (int64, error) {
  170. gpuInfo := GetGPUInfo()
  171. if gpuInfo.FreeMemory > 0 && (gpuInfo.Library == "cuda" || gpuInfo.Library == "rocm") {
  172. // leave 10% or 512MiB of VRAM free per GPU to handle unaccounted for overhead
  173. overhead := gpuInfo.FreeMemory / 10
  174. gpus := uint64(gpuInfo.DeviceCount)
  175. if overhead < gpus*512*1024*1024 {
  176. overhead = gpus * 512 * 1024 * 1024
  177. }
  178. return int64(gpuInfo.FreeMemory - overhead), nil
  179. }
  180. return 0, fmt.Errorf("no GPU detected") // TODO - better handling of CPU based memory determiniation
  181. }
  182. func FindGPULibs(baseLibName string, patterns []string) []string {
  183. // Multiple GPU libraries may exist, and some may not work, so keep trying until we exhaust them
  184. var ldPaths []string
  185. gpuLibPaths := []string{}
  186. slog.Info(fmt.Sprintf("Searching for GPU management library %s", baseLibName))
  187. switch runtime.GOOS {
  188. case "windows":
  189. ldPaths = strings.Split(os.Getenv("PATH"), ";")
  190. case "linux":
  191. ldPaths = strings.Split(os.Getenv("LD_LIBRARY_PATH"), ":")
  192. default:
  193. return gpuLibPaths
  194. }
  195. // Start with whatever we find in the PATH/LD_LIBRARY_PATH
  196. for _, ldPath := range ldPaths {
  197. d, err := filepath.Abs(ldPath)
  198. if err != nil {
  199. continue
  200. }
  201. patterns = append(patterns, filepath.Join(d, baseLibName+"*"))
  202. }
  203. slog.Debug(fmt.Sprintf("gpu management search paths: %v", patterns))
  204. for _, pattern := range patterns {
  205. // Ignore glob discovery errors
  206. matches, _ := filepath.Glob(pattern)
  207. for _, match := range matches {
  208. // Resolve any links so we don't try the same lib multiple times
  209. // and weed out any dups across globs
  210. libPath := match
  211. tmp := match
  212. var err error
  213. for ; err == nil; tmp, err = os.Readlink(libPath) {
  214. if !filepath.IsAbs(tmp) {
  215. tmp = filepath.Join(filepath.Dir(libPath), tmp)
  216. }
  217. libPath = tmp
  218. }
  219. new := true
  220. for _, cmp := range gpuLibPaths {
  221. if cmp == libPath {
  222. new = false
  223. break
  224. }
  225. }
  226. if new {
  227. gpuLibPaths = append(gpuLibPaths, libPath)
  228. }
  229. }
  230. }
  231. slog.Info(fmt.Sprintf("Discovered GPU libraries: %v", gpuLibPaths))
  232. return gpuLibPaths
  233. }
  234. func LoadCUDAMgmt(cudaLibPaths []string) *C.cuda_handle_t {
  235. var resp C.cuda_init_resp_t
  236. for _, libPath := range cudaLibPaths {
  237. lib := C.CString(libPath)
  238. defer C.free(unsafe.Pointer(lib))
  239. C.cuda_init(lib, &resp)
  240. if resp.err != nil {
  241. slog.Info(fmt.Sprintf("Unable to load CUDA management library %s: %s", libPath, C.GoString(resp.err)))
  242. C.free(unsafe.Pointer(resp.err))
  243. } else {
  244. return &resp.ch
  245. }
  246. }
  247. return nil
  248. }
  249. func LoadROCMMgmt(rocmLibPaths []string) *C.rocm_handle_t {
  250. var resp C.rocm_init_resp_t
  251. for _, libPath := range rocmLibPaths {
  252. lib := C.CString(libPath)
  253. defer C.free(unsafe.Pointer(lib))
  254. C.rocm_init(lib, &resp)
  255. if resp.err != nil {
  256. slog.Info(fmt.Sprintf("Unable to load ROCm management library %s: %s", libPath, C.GoString(resp.err)))
  257. C.free(unsafe.Pointer(resp.err))
  258. } else {
  259. return &resp.rh
  260. }
  261. }
  262. return nil
  263. }