gpu.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. //go:build linux || windows
  2. package gpu
  3. /*
  4. #cgo linux LDFLAGS: -lrt -lpthread -ldl -lstdc++ -lm
  5. #cgo windows LDFLAGS: -lpthread
  6. #include "gpu_info.h"
  7. */
  8. import "C"
  9. import (
  10. "fmt"
  11. "log/slog"
  12. "os"
  13. "path/filepath"
  14. "runtime"
  15. "strings"
  16. "sync"
  17. "unsafe"
  18. "github.com/ollama/ollama/envconfig"
  19. "github.com/ollama/ollama/format"
  20. )
  21. type cudaHandles struct {
  22. deviceCount int
  23. cudart *C.cudart_handle_t
  24. nvcuda *C.nvcuda_handle_t
  25. nvml *C.nvml_handle_t
  26. }
  27. type oneapiHandles struct {
  28. oneapi *C.oneapi_handle_t
  29. deviceCount int
  30. }
  31. const (
  32. cudaMinimumMemory = 457 * format.MebiByte
  33. rocmMinimumMemory = 457 * format.MebiByte
  34. // TODO OneAPI minimum memory
  35. )
  36. var (
  37. gpuMutex sync.Mutex
  38. bootstrapped bool
  39. cpuCapability CPUCapability
  40. cpus []CPUInfo
  41. cudaGPUs []CudaGPUInfo
  42. nvcudaLibPath string
  43. cudartLibPath string
  44. oneapiLibPath string
  45. nvmlLibPath string
  46. rocmGPUs []RocmGPUInfo
  47. oneapiGPUs []OneapiGPUInfo
  48. )
  49. // With our current CUDA compile flags, older than 5.0 will not work properly
  50. var CudaComputeMin = [2]C.int{5, 0}
  51. var RocmComputeMin = 9
  52. // TODO find a better way to detect iGPU instead of minimum memory
  53. const IGPUMemLimit = 1 * format.GibiByte // 512G is what they typically report, so anything less than 1G must be iGPU
  54. // Jetson devices have JETSON_JETPACK="x.y.z" factory set to the Jetpack version installed.
  55. // Included to drive logic for reducing Ollama-allocated overhead on L4T/Jetson devices.
  56. var CudaTegra string = os.Getenv("JETSON_JETPACK")
  57. // Note: gpuMutex must already be held
  58. func initCudaHandles() *cudaHandles {
  59. // TODO - if the ollama build is CPU only, don't do these checks as they're irrelevant and confusing
  60. cHandles := &cudaHandles{}
  61. // Short Circuit if we already know which library to use
  62. if nvmlLibPath != "" {
  63. cHandles.nvml, _ = LoadNVMLMgmt([]string{nvmlLibPath})
  64. return cHandles
  65. }
  66. if nvcudaLibPath != "" {
  67. cHandles.deviceCount, cHandles.nvcuda, _ = LoadNVCUDAMgmt([]string{nvcudaLibPath})
  68. return cHandles
  69. }
  70. if cudartLibPath != "" {
  71. cHandles.deviceCount, cHandles.cudart, _ = LoadCUDARTMgmt([]string{cudartLibPath})
  72. return cHandles
  73. }
  74. slog.Debug("searching for GPU discovery libraries for NVIDIA")
  75. var cudartMgmtPatterns []string
  76. // Aligned with driver, we can't carry as payloads
  77. nvcudaMgmtPatterns := NvcudaGlobs
  78. if runtime.GOOS == "windows" {
  79. localAppData := os.Getenv("LOCALAPPDATA")
  80. cudartMgmtPatterns = []string{filepath.Join(localAppData, "Programs", "Ollama", CudartMgmtName)}
  81. }
  82. tmpDir, _ := PayloadsDir()
  83. if tmpDir != "" {
  84. // TODO - add "payloads" for subprocess
  85. cudartMgmtPatterns = []string{filepath.Join(tmpDir, "cuda*", CudartMgmtName)}
  86. }
  87. cudartMgmtPatterns = append(cudartMgmtPatterns, CudartGlobs...)
  88. if len(NvmlGlobs) > 0 {
  89. nvmlLibPaths := FindGPULibs(NvmlMgmtName, NvmlGlobs)
  90. if len(nvmlLibPaths) > 0 {
  91. nvml, libPath := LoadNVMLMgmt(nvmlLibPaths)
  92. if nvml != nil {
  93. slog.Debug("nvidia-ml loaded", "library", libPath)
  94. cHandles.nvml = nvml
  95. nvmlLibPath = libPath
  96. }
  97. }
  98. }
  99. nvcudaLibPaths := FindGPULibs(NvcudaMgmtName, nvcudaMgmtPatterns)
  100. if len(nvcudaLibPaths) > 0 {
  101. deviceCount, nvcuda, libPath := LoadNVCUDAMgmt(nvcudaLibPaths)
  102. if nvcuda != nil {
  103. slog.Debug("detected GPUs", "count", deviceCount, "library", libPath)
  104. cHandles.nvcuda = nvcuda
  105. cHandles.deviceCount = deviceCount
  106. nvcudaLibPath = libPath
  107. return cHandles
  108. }
  109. }
  110. cudartLibPaths := FindGPULibs(CudartMgmtName, cudartMgmtPatterns)
  111. if len(cudartLibPaths) > 0 {
  112. deviceCount, cudart, libPath := LoadCUDARTMgmt(cudartLibPaths)
  113. if cudart != nil {
  114. slog.Debug("detected GPUs", "library", libPath, "count", deviceCount)
  115. cHandles.cudart = cudart
  116. cHandles.deviceCount = deviceCount
  117. cudartLibPath = libPath
  118. return cHandles
  119. }
  120. }
  121. return cHandles
  122. }
  123. // Note: gpuMutex must already be held
  124. func initOneAPIHandles() *oneapiHandles {
  125. oHandles := &oneapiHandles{}
  126. // Short Circuit if we already know which library to use
  127. if oneapiLibPath != "" {
  128. oHandles.deviceCount, oHandles.oneapi, _ = LoadOneapiMgmt([]string{oneapiLibPath})
  129. return oHandles
  130. }
  131. oneapiLibPaths := FindGPULibs(OneapiMgmtName, OneapiGlobs)
  132. if len(oneapiLibPaths) > 0 {
  133. oHandles.deviceCount, oHandles.oneapi, oneapiLibPath = LoadOneapiMgmt(oneapiLibPaths)
  134. }
  135. return oHandles
  136. }
  137. func GetCPUInfo() GpuInfoList {
  138. gpuMutex.Lock()
  139. if !bootstrapped {
  140. gpuMutex.Unlock()
  141. GetGPUInfo()
  142. } else {
  143. gpuMutex.Unlock()
  144. }
  145. return GpuInfoList{cpus[0].GpuInfo}
  146. }
  147. func GetGPUInfo() GpuInfoList {
  148. // TODO - consider exploring lspci (and equivalent on windows) to check for
  149. // GPUs so we can report warnings if we see Nvidia/AMD but fail to load the libraries
  150. gpuMutex.Lock()
  151. defer gpuMutex.Unlock()
  152. needRefresh := true
  153. var cHandles *cudaHandles
  154. var oHandles *oneapiHandles
  155. defer func() {
  156. if cHandles != nil {
  157. if cHandles.cudart != nil {
  158. C.cudart_release(*cHandles.cudart)
  159. }
  160. if cHandles.nvcuda != nil {
  161. C.nvcuda_release(*cHandles.nvcuda)
  162. }
  163. if cHandles.nvml != nil {
  164. C.nvml_release(*cHandles.nvml)
  165. }
  166. }
  167. if oHandles != nil {
  168. if oHandles.oneapi != nil {
  169. // TODO - is this needed?
  170. C.oneapi_release(*oHandles.oneapi)
  171. }
  172. }
  173. }()
  174. if !bootstrapped {
  175. slog.Debug("Detecting GPUs")
  176. needRefresh = false
  177. cpuCapability = GetCPUCapability()
  178. var memInfo C.mem_info_t
  179. mem, err := GetCPUMem()
  180. if err != nil {
  181. slog.Warn("error looking up system memory", "error", err)
  182. }
  183. cpus = []CPUInfo{CPUInfo{
  184. GpuInfo: GpuInfo{
  185. memInfo: mem,
  186. Library: "cpu",
  187. Variant: cpuCapability,
  188. ID: "0",
  189. },
  190. }}
  191. // Fallback to CPU mode if we're lacking required vector extensions on x86
  192. if cpuCapability < GPURunnerCPUCapability && runtime.GOARCH == "amd64" {
  193. slog.Warn("CPU does not have minimum vector extensions, GPU inference disabled", "required", GPURunnerCPUCapability, "detected", cpuCapability)
  194. bootstrapped = true
  195. // No need to do any GPU discovery, since we can't run on them
  196. return GpuInfoList{cpus[0].GpuInfo}
  197. }
  198. // On windows we bundle the nvidia library one level above the runner dir
  199. depPath := ""
  200. if runtime.GOOS == "windows" && envconfig.RunnersDir != "" {
  201. depPath = filepath.Dir(envconfig.RunnersDir)
  202. }
  203. // Load ALL libraries
  204. cHandles = initCudaHandles()
  205. // NVIDIA
  206. for i := range cHandles.deviceCount {
  207. if cHandles.cudart != nil || cHandles.nvcuda != nil {
  208. gpuInfo := CudaGPUInfo{
  209. GpuInfo: GpuInfo{
  210. Library: "cuda",
  211. },
  212. index: i,
  213. }
  214. var driverMajor int
  215. var driverMinor int
  216. if cHandles.cudart != nil {
  217. C.cudart_bootstrap(*cHandles.cudart, C.int(i), &memInfo)
  218. } else {
  219. C.nvcuda_bootstrap(*cHandles.nvcuda, C.int(i), &memInfo)
  220. driverMajor = int(cHandles.nvcuda.driver_major)
  221. driverMinor = int(cHandles.nvcuda.driver_minor)
  222. }
  223. if memInfo.err != nil {
  224. slog.Info("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
  225. C.free(unsafe.Pointer(memInfo.err))
  226. continue
  227. }
  228. if memInfo.major < CudaComputeMin[0] || (memInfo.major == CudaComputeMin[0] && memInfo.minor < CudaComputeMin[1]) {
  229. slog.Info(fmt.Sprintf("[%d] CUDA GPU is too old. Compute Capability detected: %d.%d", i, memInfo.major, memInfo.minor))
  230. continue
  231. }
  232. gpuInfo.TotalMemory = uint64(memInfo.total)
  233. gpuInfo.FreeMemory = uint64(memInfo.free)
  234. gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
  235. gpuInfo.Compute = fmt.Sprintf("%d.%d", memInfo.major, memInfo.minor)
  236. gpuInfo.MinimumMemory = cudaMinimumMemory
  237. gpuInfo.DependencyPath = depPath
  238. gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
  239. gpuInfo.DriverMajor = driverMajor
  240. gpuInfo.DriverMinor = driverMinor
  241. // TODO potentially sort on our own algorithm instead of what the underlying GPU library does...
  242. cudaGPUs = append(cudaGPUs, gpuInfo)
  243. }
  244. }
  245. // Intel
  246. if envconfig.IntelGpu {
  247. oHandles = initOneAPIHandles()
  248. for d := range oHandles.oneapi.num_drivers {
  249. if oHandles.oneapi == nil {
  250. // shouldn't happen
  251. slog.Warn("nil oneapi handle with driver count", "count", int(oHandles.oneapi.num_drivers))
  252. continue
  253. }
  254. devCount := C.oneapi_get_device_count(*oHandles.oneapi, C.int(d))
  255. for i := range devCount {
  256. gpuInfo := OneapiGPUInfo{
  257. GpuInfo: GpuInfo{
  258. Library: "oneapi",
  259. },
  260. driverIndex: int(d),
  261. gpuIndex: int(i),
  262. }
  263. // TODO - split bootstrapping from updating free memory
  264. C.oneapi_check_vram(*oHandles.oneapi, C.int(d), i, &memInfo)
  265. // TODO - convert this to MinimumMemory based on testing...
  266. var totalFreeMem float64 = float64(memInfo.free) * 0.95 // work-around: leave some reserve vram for mkl lib used in ggml-sycl backend.
  267. memInfo.free = C.uint64_t(totalFreeMem)
  268. gpuInfo.TotalMemory = uint64(memInfo.total)
  269. gpuInfo.FreeMemory = uint64(memInfo.free)
  270. gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
  271. gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
  272. // TODO dependency path?
  273. oneapiGPUs = append(oneapiGPUs, gpuInfo)
  274. }
  275. }
  276. }
  277. rocmGPUs = AMDGetGPUInfo()
  278. bootstrapped = true
  279. }
  280. // For detected GPUs, load library if not loaded
  281. // Refresh free memory usage
  282. if needRefresh {
  283. mem, err := GetCPUMem()
  284. if err != nil {
  285. slog.Warn("error looking up system memory", "error", err)
  286. } else {
  287. slog.Debug("updating system memory data",
  288. slog.Group(
  289. "before",
  290. "total", format.HumanBytes2(cpus[0].TotalMemory),
  291. "free", format.HumanBytes2(cpus[0].FreeMemory),
  292. ),
  293. slog.Group(
  294. "now",
  295. "total", format.HumanBytes2(mem.TotalMemory),
  296. "free", format.HumanBytes2(mem.FreeMemory),
  297. ),
  298. )
  299. cpus[0].FreeMemory = mem.FreeMemory
  300. }
  301. var memInfo C.mem_info_t
  302. if cHandles == nil && len(cudaGPUs) > 0 {
  303. cHandles = initCudaHandles()
  304. }
  305. for i, gpu := range cudaGPUs {
  306. if cHandles.nvml != nil {
  307. C.nvml_get_free(*cHandles.nvml, C.int(gpu.index), &memInfo.free, &memInfo.total, &memInfo.used)
  308. } else if cHandles.cudart != nil {
  309. C.cudart_bootstrap(*cHandles.cudart, C.int(gpu.index), &memInfo)
  310. } else if cHandles.nvcuda != nil {
  311. C.nvcuda_get_free(*cHandles.nvcuda, C.int(gpu.index), &memInfo.free, &memInfo.total)
  312. memInfo.used = memInfo.total - memInfo.free
  313. } else {
  314. // shouldn't happen
  315. slog.Warn("no valid cuda library loaded to refresh vram usage")
  316. break
  317. }
  318. if memInfo.err != nil {
  319. slog.Warn("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
  320. C.free(unsafe.Pointer(memInfo.err))
  321. continue
  322. }
  323. if memInfo.free == 0 {
  324. slog.Warn("error looking up nvidia GPU memory")
  325. continue
  326. }
  327. slog.Debug("updating cuda memory data",
  328. "gpu", gpu.ID,
  329. "name", gpu.Name,
  330. slog.Group(
  331. "before",
  332. "total", format.HumanBytes2(gpu.TotalMemory),
  333. "free", format.HumanBytes2(gpu.FreeMemory),
  334. ),
  335. slog.Group(
  336. "now",
  337. "total", format.HumanBytes2(uint64(memInfo.total)),
  338. "free", format.HumanBytes2(uint64(memInfo.free)),
  339. "used", format.HumanBytes2(uint64(memInfo.used)),
  340. ),
  341. )
  342. cudaGPUs[i].FreeMemory = uint64(memInfo.free)
  343. }
  344. if oHandles == nil && len(oneapiGPUs) > 0 {
  345. oHandles = initOneAPIHandles()
  346. }
  347. for i, gpu := range oneapiGPUs {
  348. if oHandles.oneapi == nil {
  349. // shouldn't happen
  350. slog.Warn("nil oneapi handle with device count", "count", oHandles.deviceCount)
  351. continue
  352. }
  353. C.oneapi_check_vram(*oHandles.oneapi, C.int(gpu.driverIndex), C.int(gpu.gpuIndex), &memInfo)
  354. // TODO - convert this to MinimumMemory based on testing...
  355. var totalFreeMem float64 = float64(memInfo.free) * 0.95 // work-around: leave some reserve vram for mkl lib used in ggml-sycl backend.
  356. memInfo.free = C.uint64_t(totalFreeMem)
  357. oneapiGPUs[i].FreeMemory = uint64(memInfo.free)
  358. }
  359. err = RocmGPUInfoList(rocmGPUs).RefreshFreeMemory()
  360. if err != nil {
  361. slog.Debug("problem refreshing ROCm free memory", "error", err)
  362. }
  363. }
  364. resp := []GpuInfo{}
  365. for _, gpu := range cudaGPUs {
  366. resp = append(resp, gpu.GpuInfo)
  367. }
  368. for _, gpu := range rocmGPUs {
  369. resp = append(resp, gpu.GpuInfo)
  370. }
  371. for _, gpu := range oneapiGPUs {
  372. resp = append(resp, gpu.GpuInfo)
  373. }
  374. if len(resp) == 0 {
  375. resp = append(resp, cpus[0].GpuInfo)
  376. }
  377. return resp
  378. }
  379. func FindGPULibs(baseLibName string, defaultPatterns []string) []string {
  380. // Multiple GPU libraries may exist, and some may not work, so keep trying until we exhaust them
  381. var ldPaths []string
  382. var patterns []string
  383. gpuLibPaths := []string{}
  384. slog.Debug("Searching for GPU library", "name", baseLibName)
  385. switch runtime.GOOS {
  386. case "windows":
  387. ldPaths = strings.Split(os.Getenv("PATH"), ";")
  388. case "linux":
  389. ldPaths = strings.Split(os.Getenv("LD_LIBRARY_PATH"), ":")
  390. default:
  391. return gpuLibPaths
  392. }
  393. // Start with whatever we find in the PATH/LD_LIBRARY_PATH
  394. for _, ldPath := range ldPaths {
  395. d, err := filepath.Abs(ldPath)
  396. if err != nil {
  397. continue
  398. }
  399. patterns = append(patterns, filepath.Join(d, baseLibName+"*"))
  400. }
  401. patterns = append(patterns, defaultPatterns...)
  402. slog.Debug("gpu library search", "globs", patterns)
  403. for _, pattern := range patterns {
  404. // Nvidia PhysX known to return bogus results
  405. if strings.Contains(pattern, "PhysX") {
  406. slog.Debug("skipping PhysX cuda library path", "path", pattern)
  407. continue
  408. }
  409. // Ignore glob discovery errors
  410. matches, _ := filepath.Glob(pattern)
  411. for _, match := range matches {
  412. // Resolve any links so we don't try the same lib multiple times
  413. // and weed out any dups across globs
  414. libPath := match
  415. tmp := match
  416. var err error
  417. for ; err == nil; tmp, err = os.Readlink(libPath) {
  418. if !filepath.IsAbs(tmp) {
  419. tmp = filepath.Join(filepath.Dir(libPath), tmp)
  420. }
  421. libPath = tmp
  422. }
  423. new := true
  424. for _, cmp := range gpuLibPaths {
  425. if cmp == libPath {
  426. new = false
  427. break
  428. }
  429. }
  430. if new {
  431. gpuLibPaths = append(gpuLibPaths, libPath)
  432. }
  433. }
  434. }
  435. slog.Debug("discovered GPU libraries", "paths", gpuLibPaths)
  436. return gpuLibPaths
  437. }
  438. func LoadCUDARTMgmt(cudartLibPaths []string) (int, *C.cudart_handle_t, string) {
  439. var resp C.cudart_init_resp_t
  440. resp.ch.verbose = getVerboseState()
  441. for _, libPath := range cudartLibPaths {
  442. lib := C.CString(libPath)
  443. defer C.free(unsafe.Pointer(lib))
  444. C.cudart_init(lib, &resp)
  445. if resp.err != nil {
  446. slog.Debug("Unable to load cudart", "library", libPath, "error", C.GoString(resp.err))
  447. C.free(unsafe.Pointer(resp.err))
  448. } else {
  449. return int(resp.num_devices), &resp.ch, libPath
  450. }
  451. }
  452. return 0, nil, ""
  453. }
  454. func LoadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string) {
  455. var resp C.nvcuda_init_resp_t
  456. resp.ch.verbose = getVerboseState()
  457. for _, libPath := range nvcudaLibPaths {
  458. lib := C.CString(libPath)
  459. defer C.free(unsafe.Pointer(lib))
  460. C.nvcuda_init(lib, &resp)
  461. if resp.err != nil {
  462. slog.Debug("Unable to load nvcuda", "library", libPath, "error", C.GoString(resp.err))
  463. C.free(unsafe.Pointer(resp.err))
  464. } else {
  465. return int(resp.num_devices), &resp.ch, libPath
  466. }
  467. }
  468. return 0, nil, ""
  469. }
  470. func LoadNVMLMgmt(nvmlLibPaths []string) (*C.nvml_handle_t, string) {
  471. var resp C.nvml_init_resp_t
  472. resp.ch.verbose = getVerboseState()
  473. for _, libPath := range nvmlLibPaths {
  474. lib := C.CString(libPath)
  475. defer C.free(unsafe.Pointer(lib))
  476. C.nvml_init(lib, &resp)
  477. if resp.err != nil {
  478. slog.Info(fmt.Sprintf("Unable to load NVML management library %s: %s", libPath, C.GoString(resp.err)))
  479. C.free(unsafe.Pointer(resp.err))
  480. } else {
  481. return &resp.ch, libPath
  482. }
  483. }
  484. return nil, ""
  485. }
  486. func LoadOneapiMgmt(oneapiLibPaths []string) (int, *C.oneapi_handle_t, string) {
  487. var resp C.oneapi_init_resp_t
  488. num_devices := 0
  489. resp.oh.verbose = getVerboseState()
  490. for _, libPath := range oneapiLibPaths {
  491. lib := C.CString(libPath)
  492. defer C.free(unsafe.Pointer(lib))
  493. C.oneapi_init(lib, &resp)
  494. if resp.err != nil {
  495. slog.Debug("Unable to load oneAPI management library", "library", libPath, "error", C.GoString(resp.err))
  496. C.free(unsafe.Pointer(resp.err))
  497. } else {
  498. for i := range resp.oh.num_drivers {
  499. num_devices += int(C.oneapi_get_device_count(resp.oh, C.int(i)))
  500. }
  501. return num_devices, &resp.oh, libPath
  502. }
  503. }
  504. return 0, nil, ""
  505. }
  506. func getVerboseState() C.uint16_t {
  507. if envconfig.Debug {
  508. return C.uint16_t(1)
  509. }
  510. return C.uint16_t(0)
  511. }
  512. // Given the list of GPUs this instantiation is targeted for,
  513. // figure out the visible devices environment variable
  514. //
  515. // If different libraries are detected, the first one is what we use
  516. func (l GpuInfoList) GetVisibleDevicesEnv() (string, string) {
  517. if len(l) == 0 {
  518. return "", ""
  519. }
  520. switch l[0].Library {
  521. case "cuda":
  522. return cudaGetVisibleDevicesEnv(l)
  523. case "rocm":
  524. return rocmGetVisibleDevicesEnv(l)
  525. case "oneapi":
  526. return oneapiGetVisibleDevicesEnv(l)
  527. default:
  528. slog.Debug("no filter required for library " + l[0].Library)
  529. return "", ""
  530. }
  531. }