gpu.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. //go:build linux || windows
  2. package gpu
  3. /*
  4. #cgo linux LDFLAGS: -lrt -lpthread -ldl -lstdc++ -lm
  5. #cgo windows LDFLAGS: -lpthread
  6. #include "gpu_info.h"
  7. */
  8. import "C"
  9. import (
  10. "fmt"
  11. "log/slog"
  12. "os"
  13. "path/filepath"
  14. "runtime"
  15. "strconv"
  16. "strings"
  17. "sync"
  18. "unsafe"
  19. "github.com/ollama/ollama/envconfig"
  20. "github.com/ollama/ollama/format"
  21. )
  22. type handles struct {
  23. deviceCount int
  24. cudart *C.cudart_handle_t
  25. nvcuda *C.nvcuda_handle_t
  26. oneapi *C.oneapi_handle_t
  27. }
  28. const (
  29. cudaMinimumMemory = 457 * format.MebiByte
  30. rocmMinimumMemory = 457 * format.MebiByte
  31. )
  32. var (
  33. gpuMutex sync.Mutex
  34. bootstrapped bool
  35. cpuCapability CPUCapability
  36. cpus []CPUInfo
  37. cudaGPUs []CudaGPUInfo
  38. nvcudaLibPath string
  39. cudartLibPath string
  40. oneapiLibPath string
  41. rocmGPUs []RocmGPUInfo
  42. oneapiGPUs []OneapiGPUInfo
  43. )
  44. // With our current CUDA compile flags, older than 5.0 will not work properly
  45. var CudaComputeMin = [2]C.int{5, 0}
  46. var RocmComputeMin = 9
  47. // TODO find a better way to detect iGPU instead of minimum memory
  48. const IGPUMemLimit = 1 * format.GibiByte // 512G is what they typically report, so anything less than 1G must be iGPU
  49. var CudartLinuxGlobs = []string{
  50. "/usr/local/cuda/lib64/libcudart.so*",
  51. "/usr/lib/x86_64-linux-gnu/nvidia/current/libcudart.so*",
  52. "/usr/lib/x86_64-linux-gnu/libcudart.so*",
  53. "/usr/lib/wsl/lib/libcudart.so*",
  54. "/usr/lib/wsl/drivers/*/libcudart.so*",
  55. "/opt/cuda/lib64/libcudart.so*",
  56. "/usr/local/cuda*/targets/aarch64-linux/lib/libcudart.so*",
  57. "/usr/lib/aarch64-linux-gnu/nvidia/current/libcudart.so*",
  58. "/usr/lib/aarch64-linux-gnu/libcudart.so*",
  59. "/usr/local/cuda/lib*/libcudart.so*",
  60. "/usr/lib*/libcudart.so*",
  61. "/usr/local/lib*/libcudart.so*",
  62. }
  63. var CudartWindowsGlobs = []string{
  64. "c:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v*\\bin\\cudart64_*.dll",
  65. }
  66. var NvcudaLinuxGlobs = []string{
  67. "/usr/local/cuda*/targets/*/lib/libcuda.so*",
  68. "/usr/lib/*-linux-gnu/nvidia/current/libcuda.so*",
  69. "/usr/lib/*-linux-gnu/libcuda.so*",
  70. "/usr/lib/wsl/lib/libcuda.so*",
  71. "/usr/lib/wsl/drivers/*/libcuda.so*",
  72. "/opt/cuda/lib*/libcuda.so*",
  73. "/usr/local/cuda/lib*/libcuda.so*",
  74. "/usr/lib*/libcuda.so*",
  75. "/usr/local/lib*/libcuda.so*",
  76. }
  77. var NvcudaWindowsGlobs = []string{
  78. "c:\\windows\\system*\\nvcuda.dll",
  79. }
  80. var OneapiWindowsGlobs = []string{
  81. "c:\\Windows\\System32\\DriverStore\\FileRepository\\*\\ze_intel_gpu64.dll",
  82. }
  83. var OneapiLinuxGlobs = []string{
  84. "/usr/lib/x86_64-linux-gnu/libze_intel_gpu.so*",
  85. "/usr/lib*/libze_intel_gpu.so*",
  86. }
  87. // Jetson devices have JETSON_JETPACK="x.y.z" factory set to the Jetpack version installed.
  88. // Included to drive logic for reducing Ollama-allocated overhead on L4T/Jetson devices.
  89. var CudaTegra string = os.Getenv("JETSON_JETPACK")
  90. // Note: gpuMutex must already be held
  91. func initCudaHandles() *handles {
  92. // TODO - if the ollama build is CPU only, don't do these checks as they're irrelevant and confusing
  93. gpuHandles := &handles{}
  94. // Short Circuit if we already know which library to use
  95. if nvcudaLibPath != "" {
  96. gpuHandles.deviceCount, gpuHandles.nvcuda, _ = LoadNVCUDAMgmt([]string{nvcudaLibPath})
  97. return gpuHandles
  98. }
  99. if cudartLibPath != "" {
  100. gpuHandles.deviceCount, gpuHandles.cudart, _ = LoadCUDARTMgmt([]string{cudartLibPath})
  101. return gpuHandles
  102. }
  103. slog.Debug("searching for GPU discovery libraries for NVIDIA")
  104. var cudartMgmtName string
  105. var cudartMgmtPatterns []string
  106. var nvcudaMgmtName string
  107. var nvcudaMgmtPatterns []string
  108. var oneapiMgmtName string
  109. var oneapiMgmtPatterns []string
  110. tmpDir, _ := PayloadsDir()
  111. switch runtime.GOOS {
  112. case "windows":
  113. cudartMgmtName = "cudart64_*.dll"
  114. localAppData := os.Getenv("LOCALAPPDATA")
  115. cudartMgmtPatterns = []string{filepath.Join(localAppData, "Programs", "Ollama", cudartMgmtName)}
  116. cudartMgmtPatterns = append(cudartMgmtPatterns, CudartWindowsGlobs...)
  117. // Aligned with driver, we can't carry as payloads
  118. nvcudaMgmtName = "nvcuda.dll"
  119. nvcudaMgmtPatterns = NvcudaWindowsGlobs
  120. oneapiMgmtName = "ze_intel_gpu64.dll"
  121. oneapiMgmtPatterns = OneapiWindowsGlobs
  122. case "linux":
  123. cudartMgmtName = "libcudart.so*"
  124. if tmpDir != "" {
  125. // TODO - add "payloads" for subprocess
  126. cudartMgmtPatterns = []string{filepath.Join(tmpDir, "cuda*", cudartMgmtName)}
  127. }
  128. cudartMgmtPatterns = append(cudartMgmtPatterns, CudartLinuxGlobs...)
  129. // Aligned with driver, we can't carry as payloads
  130. nvcudaMgmtName = "libcuda.so*"
  131. nvcudaMgmtPatterns = NvcudaLinuxGlobs
  132. oneapiMgmtName = "libze_intel_gpu.so"
  133. oneapiMgmtPatterns = OneapiLinuxGlobs
  134. default:
  135. return gpuHandles
  136. }
  137. nvcudaLibPaths := FindGPULibs(nvcudaMgmtName, nvcudaMgmtPatterns)
  138. if len(nvcudaLibPaths) > 0 {
  139. deviceCount, nvcuda, libPath := LoadNVCUDAMgmt(nvcudaLibPaths)
  140. if nvcuda != nil {
  141. slog.Debug("detected GPUs", "count", deviceCount, "library", libPath)
  142. gpuHandles.nvcuda = nvcuda
  143. gpuHandles.deviceCount = deviceCount
  144. nvcudaLibPath = libPath
  145. return gpuHandles
  146. }
  147. }
  148. cudartLibPaths := FindGPULibs(cudartMgmtName, cudartMgmtPatterns)
  149. if len(cudartLibPaths) > 0 {
  150. deviceCount, cudart, libPath := LoadCUDARTMgmt(cudartLibPaths)
  151. if cudart != nil {
  152. slog.Debug("detected GPUs", "library", libPath, "count", deviceCount)
  153. gpuHandles.cudart = cudart
  154. gpuHandles.deviceCount = deviceCount
  155. cudartLibPath = libPath
  156. return gpuHandles
  157. }
  158. }
  159. oneapiLibPaths := FindGPULibs(oneapiMgmtName, oneapiMgmtPatterns)
  160. if len(oneapiLibPaths) > 0 {
  161. deviceCount, oneapi, libPath := LoadOneapiMgmt(oneapiLibPaths)
  162. if oneapi != nil {
  163. slog.Debug("detected Intel GPUs", "library", libPath, "count", deviceCount)
  164. gpuHandles.oneapi = oneapi
  165. gpuHandles.deviceCount = deviceCount
  166. oneapiLibPath = libPath
  167. return gpuHandles
  168. }
  169. }
  170. return gpuHandles
  171. }
  172. func GetGPUInfo() GpuInfoList {
  173. // TODO - consider exploring lspci (and equivalent on windows) to check for
  174. // GPUs so we can report warnings if we see Nvidia/AMD but fail to load the libraries
  175. gpuMutex.Lock()
  176. defer gpuMutex.Unlock()
  177. needRefresh := true
  178. var gpuHandles *handles
  179. defer func() {
  180. if gpuHandles == nil {
  181. return
  182. }
  183. if gpuHandles.cudart != nil {
  184. C.cudart_release(*gpuHandles.cudart)
  185. }
  186. if gpuHandles.nvcuda != nil {
  187. C.nvcuda_release(*gpuHandles.nvcuda)
  188. }
  189. }()
  190. if !bootstrapped {
  191. slog.Debug("Detecting GPUs")
  192. needRefresh = false
  193. cpuCapability = getCPUCapability()
  194. var memInfo C.mem_info_t
  195. C.cpu_check_ram(&memInfo)
  196. if memInfo.err != nil {
  197. slog.Info("error looking up CPU memory", "error", C.GoString(memInfo.err))
  198. C.free(unsafe.Pointer(memInfo.err))
  199. return []GpuInfo{}
  200. }
  201. cpuInfo := CPUInfo{
  202. GpuInfo: GpuInfo{
  203. Library: "cpu",
  204. Variant: cpuCapability.ToVariant(),
  205. },
  206. }
  207. cpuInfo.TotalMemory = uint64(memInfo.total)
  208. cpuInfo.FreeMemory = uint64(memInfo.free)
  209. cpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
  210. cpus = []CPUInfo{cpuInfo}
  211. // Fallback to CPU mode if we're lacking required vector extensions on x86
  212. if cpuCapability < GPURunnerCPUCapability && runtime.GOARCH == "amd64" {
  213. slog.Warn("CPU does not have minimum vector extensions, GPU inference disabled", "required", GPURunnerCPUCapability.ToString(), "detected", cpuCapability.ToString())
  214. bootstrapped = true
  215. // No need to do any GPU discovery, since we can't run on them
  216. return GpuInfoList{cpus[0].GpuInfo}
  217. }
  218. // On windows we bundle the nvidia library one level above the runner dir
  219. depPath := ""
  220. if runtime.GOOS == "windows" && envconfig.RunnersDir != "" {
  221. depPath = filepath.Dir(envconfig.RunnersDir)
  222. }
  223. // Load ALL libraries
  224. gpuHandles = initCudaHandles()
  225. // TODO needs a refactoring pass to init oneapi handles
  226. // NVIDIA
  227. for i := range gpuHandles.deviceCount {
  228. if gpuHandles.cudart != nil || gpuHandles.nvcuda != nil {
  229. gpuInfo := CudaGPUInfo{
  230. GpuInfo: GpuInfo{
  231. Library: "cuda",
  232. },
  233. index: i,
  234. }
  235. var driverMajor int
  236. var driverMinor int
  237. if gpuHandles.cudart != nil {
  238. C.cudart_bootstrap(*gpuHandles.cudart, C.int(i), &memInfo)
  239. } else {
  240. C.nvcuda_bootstrap(*gpuHandles.nvcuda, C.int(i), &memInfo)
  241. driverMajor = int(gpuHandles.nvcuda.driver_major)
  242. driverMinor = int(gpuHandles.nvcuda.driver_minor)
  243. }
  244. if memInfo.err != nil {
  245. slog.Info("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
  246. C.free(unsafe.Pointer(memInfo.err))
  247. continue
  248. }
  249. if memInfo.major < CudaComputeMin[0] || (memInfo.major == CudaComputeMin[0] && memInfo.minor < CudaComputeMin[1]) {
  250. slog.Info(fmt.Sprintf("[%d] CUDA GPU is too old. Compute Capability detected: %d.%d", i, memInfo.major, memInfo.minor))
  251. continue
  252. }
  253. gpuInfo.TotalMemory = uint64(memInfo.total)
  254. gpuInfo.FreeMemory = uint64(memInfo.free)
  255. gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
  256. gpuInfo.Compute = fmt.Sprintf("%d.%d", memInfo.major, memInfo.minor)
  257. gpuInfo.MinimumMemory = cudaMinimumMemory
  258. gpuInfo.DependencyPath = depPath
  259. gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
  260. gpuInfo.DriverMajor = int(driverMajor)
  261. gpuInfo.DriverMinor = int(driverMinor)
  262. // TODO potentially sort on our own algorithm instead of what the underlying GPU library does...
  263. cudaGPUs = append(cudaGPUs, gpuInfo)
  264. }
  265. if gpuHandles.oneapi != nil {
  266. gpuInfo := OneapiGPUInfo{
  267. GpuInfo: GpuInfo{
  268. Library: "oneapi",
  269. },
  270. index: i,
  271. }
  272. // TODO - split bootstrapping from updating free memory
  273. C.oneapi_check_vram(*gpuHandles.oneapi, &memInfo)
  274. var totalFreeMem float64 = float64(memInfo.free) * 0.95 // work-around: leave some reserve vram for mkl lib used in ggml-sycl backend.
  275. memInfo.free = C.uint64_t(totalFreeMem)
  276. gpuInfo.TotalMemory = uint64(memInfo.total)
  277. gpuInfo.FreeMemory = uint64(memInfo.free)
  278. gpuInfo.ID = strconv.Itoa(i)
  279. oneapiGPUs = append(oneapiGPUs, gpuInfo)
  280. }
  281. }
  282. rocmGPUs = AMDGetGPUInfo()
  283. bootstrapped = true
  284. }
  285. // For detected GPUs, load library if not loaded
  286. // Refresh free memory usage
  287. if needRefresh {
  288. // TODO - CPU system memory tracking/refresh
  289. var memInfo C.mem_info_t
  290. if gpuHandles == nil && len(cudaGPUs) > 0 {
  291. gpuHandles = initCudaHandles()
  292. }
  293. for i, gpu := range cudaGPUs {
  294. if gpuHandles.cudart != nil {
  295. C.cudart_bootstrap(*gpuHandles.cudart, C.int(gpu.index), &memInfo)
  296. } else {
  297. C.nvcuda_get_free(*gpuHandles.nvcuda, C.int(gpu.index), &memInfo.free)
  298. }
  299. if memInfo.err != nil {
  300. slog.Warn("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
  301. C.free(unsafe.Pointer(memInfo.err))
  302. continue
  303. }
  304. if memInfo.free == 0 {
  305. slog.Warn("error looking up nvidia GPU memory")
  306. continue
  307. }
  308. slog.Debug("updating cuda free memory", "gpu", gpu.ID, "name", gpu.Name, "before", format.HumanBytes2(gpu.FreeMemory), "now", format.HumanBytes2(uint64(memInfo.free)))
  309. cudaGPUs[i].FreeMemory = uint64(memInfo.free)
  310. }
  311. err := RocmGPUInfoList(rocmGPUs).RefreshFreeMemory()
  312. if err != nil {
  313. slog.Debug("problem refreshing ROCm free memory", "error", err)
  314. }
  315. }
  316. resp := []GpuInfo{}
  317. for _, gpu := range cudaGPUs {
  318. resp = append(resp, gpu.GpuInfo)
  319. }
  320. for _, gpu := range rocmGPUs {
  321. resp = append(resp, gpu.GpuInfo)
  322. }
  323. if len(resp) == 0 {
  324. resp = append(resp, cpus[0].GpuInfo)
  325. }
  326. return resp
  327. }
  328. func GetCPUMem() (memInfo, error) {
  329. var ret memInfo
  330. var info C.mem_info_t
  331. C.cpu_check_ram(&info)
  332. if info.err != nil {
  333. defer C.free(unsafe.Pointer(info.err))
  334. return ret, fmt.Errorf(C.GoString(info.err))
  335. }
  336. ret.FreeMemory = uint64(info.free)
  337. ret.TotalMemory = uint64(info.total)
  338. return ret, nil
  339. }
  340. func FindGPULibs(baseLibName string, defaultPatterns []string) []string {
  341. // Multiple GPU libraries may exist, and some may not work, so keep trying until we exhaust them
  342. var ldPaths []string
  343. var patterns []string
  344. gpuLibPaths := []string{}
  345. slog.Debug("Searching for GPU library", "name", baseLibName)
  346. switch runtime.GOOS {
  347. case "windows":
  348. ldPaths = strings.Split(os.Getenv("PATH"), ";")
  349. case "linux":
  350. ldPaths = strings.Split(os.Getenv("LD_LIBRARY_PATH"), ":")
  351. default:
  352. return gpuLibPaths
  353. }
  354. // Start with whatever we find in the PATH/LD_LIBRARY_PATH
  355. for _, ldPath := range ldPaths {
  356. d, err := filepath.Abs(ldPath)
  357. if err != nil {
  358. continue
  359. }
  360. patterns = append(patterns, filepath.Join(d, baseLibName+"*"))
  361. }
  362. patterns = append(patterns, defaultPatterns...)
  363. slog.Debug("gpu library search", "globs", patterns)
  364. for _, pattern := range patterns {
  365. // Nvidia PhysX known to return bogus results
  366. if strings.Contains(pattern, "PhysX") {
  367. slog.Debug("skipping PhysX cuda library path", "path", pattern)
  368. continue
  369. }
  370. // Ignore glob discovery errors
  371. matches, _ := filepath.Glob(pattern)
  372. for _, match := range matches {
  373. // Resolve any links so we don't try the same lib multiple times
  374. // and weed out any dups across globs
  375. libPath := match
  376. tmp := match
  377. var err error
  378. for ; err == nil; tmp, err = os.Readlink(libPath) {
  379. if !filepath.IsAbs(tmp) {
  380. tmp = filepath.Join(filepath.Dir(libPath), tmp)
  381. }
  382. libPath = tmp
  383. }
  384. new := true
  385. for _, cmp := range gpuLibPaths {
  386. if cmp == libPath {
  387. new = false
  388. break
  389. }
  390. }
  391. if new {
  392. gpuLibPaths = append(gpuLibPaths, libPath)
  393. }
  394. }
  395. }
  396. slog.Debug("discovered GPU libraries", "paths", gpuLibPaths)
  397. return gpuLibPaths
  398. }
  399. func LoadCUDARTMgmt(cudartLibPaths []string) (int, *C.cudart_handle_t, string) {
  400. var resp C.cudart_init_resp_t
  401. resp.ch.verbose = getVerboseState()
  402. for _, libPath := range cudartLibPaths {
  403. lib := C.CString(libPath)
  404. defer C.free(unsafe.Pointer(lib))
  405. C.cudart_init(lib, &resp)
  406. if resp.err != nil {
  407. slog.Debug("Unable to load cudart", "library", libPath, "error", C.GoString(resp.err))
  408. C.free(unsafe.Pointer(resp.err))
  409. } else {
  410. return int(resp.num_devices), &resp.ch, libPath
  411. }
  412. }
  413. return 0, nil, ""
  414. }
  415. func LoadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string) {
  416. var resp C.nvcuda_init_resp_t
  417. resp.ch.verbose = getVerboseState()
  418. for _, libPath := range nvcudaLibPaths {
  419. lib := C.CString(libPath)
  420. defer C.free(unsafe.Pointer(lib))
  421. C.nvcuda_init(lib, &resp)
  422. if resp.err != nil {
  423. slog.Debug("Unable to load nvcuda", "library", libPath, "error", C.GoString(resp.err))
  424. C.free(unsafe.Pointer(resp.err))
  425. } else {
  426. return int(resp.num_devices), &resp.ch, libPath
  427. }
  428. }
  429. return 0, nil, ""
  430. }
  431. func LoadOneapiMgmt(oneapiLibPaths []string) (int, *C.oneapi_handle_t, string) {
  432. var resp C.oneapi_init_resp_t
  433. resp.oh.verbose = getVerboseState()
  434. for _, libPath := range oneapiLibPaths {
  435. lib := C.CString(libPath)
  436. defer C.free(unsafe.Pointer(lib))
  437. C.oneapi_init(lib, &resp)
  438. if resp.err != nil {
  439. slog.Debug("Unable to load oneAPI management library", "library", libPath, "error", C.GoString(resp.err))
  440. C.free(unsafe.Pointer(resp.err))
  441. } else {
  442. return int(resp.num_devices), &resp.oh, libPath
  443. }
  444. }
  445. return 0, nil, ""
  446. }
  447. func getVerboseState() C.uint16_t {
  448. if envconfig.Debug {
  449. return C.uint16_t(1)
  450. }
  451. return C.uint16_t(0)
  452. }
  453. // Given the list of GPUs this instantiation is targeted for,
  454. // figure out the visible devices environment variable
  455. //
  456. // If different libraries are detected, the first one is what we use
  457. func (l GpuInfoList) GetVisibleDevicesEnv() (string, string) {
  458. if len(l) == 0 {
  459. return "", ""
  460. }
  461. switch l[0].Library {
  462. case "cuda":
  463. return cudaGetVisibleDevicesEnv(l)
  464. case "rocm":
  465. return rocmGetVisibleDevicesEnv(l)
  466. case "oneapi":
  467. return oneapiGetVisibleDevicesEnv(l)
  468. default:
  469. slog.Debug("no filter required for library " + l[0].Library)
  470. return "", ""
  471. }
  472. }