types.go 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. package discover
  2. import (
  3. "fmt"
  4. "log/slog"
  5. "github.com/ollama/ollama/format"
  6. )
  7. type memInfo struct {
  8. TotalMemory uint64 `json:"total_memory,omitempty"`
  9. FreeMemory uint64 `json:"free_memory,omitempty"`
  10. FreeSwap uint64 `json:"free_swap,omitempty"` // TODO split this out for system only
  11. }
  12. // Beginning of an `ollama info` command
  13. type GpuInfo struct { // TODO better name maybe "InferenceProcessor"?
  14. memInfo
  15. Library string `json:"library,omitempty"`
  16. // Optional variant to select (e.g. versions, cpu feature flags)
  17. Variant string `json:"variant"`
  18. // MinimumMemory represents the minimum memory required to use the GPU
  19. MinimumMemory uint64 `json:"-"`
  20. // Any extra PATH/LD_LIBRARY_PATH dependencies required for the Library to operate properly
  21. DependencyPath []string `json:"lib_path,omitempty"`
  22. // Extra environment variables specific to the GPU as list of [key,value]
  23. EnvWorkarounds [][2]string `json:"envs,omitempty"`
  24. // Set to true if we can NOT reliably discover FreeMemory. A value of true indicates
  25. // the FreeMemory is best effort, and may over or under report actual memory usage
  26. // False indicates FreeMemory can generally be trusted on this GPU
  27. UnreliableFreeMemory bool
  28. // GPU information
  29. ID string `json:"gpu_id"` // string to use for selection of this specific GPU
  30. Name string `json:"name"` // user friendly name if available
  31. Compute string `json:"compute"` // Compute Capability or gfx
  32. // Driver Information - TODO no need to put this on each GPU
  33. DriverMajor int `json:"driver_major,omitempty"`
  34. DriverMinor int `json:"driver_minor,omitempty"`
  35. // TODO other performance capability info to help in scheduling decisions
  36. }
  37. func (gpu GpuInfo) RunnerName() string {
  38. if gpu.Variant != "" {
  39. return gpu.Library + "_" + gpu.Variant
  40. }
  41. return gpu.Library
  42. }
  43. type CPUInfo struct {
  44. GpuInfo
  45. CPUs []CPU
  46. }
  47. // CPU type represents a CPU Package occupying a socket
  48. type CPU struct {
  49. ID string `cpuinfo:"processor"`
  50. VendorID string `cpuinfo:"vendor_id"`
  51. ModelName string `cpuinfo:"model name"`
  52. CoreCount int
  53. EfficiencyCoreCount int // Performance = CoreCount - Efficiency
  54. ThreadCount int
  55. }
  56. type CudaGPUInfo struct {
  57. GpuInfo
  58. OSOverhead uint64 // Memory overhead between the driver library and management library
  59. index int //nolint:unused,nolintlint
  60. computeMajor int //nolint:unused,nolintlint
  61. computeMinor int //nolint:unused,nolintlint
  62. }
  63. type CudaGPUInfoList []CudaGPUInfo
  64. type RocmGPUInfo struct {
  65. GpuInfo
  66. usedFilepath string //nolint:unused,nolintlint
  67. index int //nolint:unused,nolintlint
  68. }
  69. type RocmGPUInfoList []RocmGPUInfo
  70. type OneapiGPUInfo struct {
  71. GpuInfo
  72. driverIndex int //nolint:unused,nolintlint
  73. gpuIndex int //nolint:unused,nolintlint
  74. }
  75. type OneapiGPUInfoList []OneapiGPUInfo
  76. type GpuInfoList []GpuInfo
  77. type UnsupportedGPUInfo struct {
  78. GpuInfo
  79. Reason string `json:"reason"`
  80. }
  81. // Split up the set of gpu info's by Library and variant
  82. func (l GpuInfoList) ByLibrary() []GpuInfoList {
  83. resp := []GpuInfoList{}
  84. libs := []string{}
  85. for _, info := range l {
  86. found := false
  87. requested := info.Library
  88. if info.Variant != "" {
  89. requested += "_" + info.Variant
  90. }
  91. for i, lib := range libs {
  92. if lib == requested {
  93. resp[i] = append(resp[i], info)
  94. found = true
  95. break
  96. }
  97. }
  98. if !found {
  99. libs = append(libs, requested)
  100. resp = append(resp, []GpuInfo{info})
  101. }
  102. }
  103. return resp
  104. }
  105. // Report the GPU information into the log an Info level
  106. func (l GpuInfoList) LogDetails() {
  107. for _, g := range l {
  108. slog.Info("inference compute",
  109. "id", g.ID,
  110. "library", g.Library,
  111. "variant", g.Variant,
  112. "compute", g.Compute,
  113. "driver", fmt.Sprintf("%d.%d", g.DriverMajor, g.DriverMinor),
  114. "name", g.Name,
  115. "total", format.HumanBytes2(g.TotalMemory),
  116. "available", format.HumanBytes2(g.FreeMemory),
  117. )
  118. }
  119. }
  120. // Sort by Free Space
  121. type ByFreeMemory []GpuInfo
  122. func (a ByFreeMemory) Len() int { return len(a) }
  123. func (a ByFreeMemory) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
  124. func (a ByFreeMemory) Less(i, j int) bool { return a[i].FreeMemory < a[j].FreeMemory }
  125. type SystemInfo struct {
  126. System CPUInfo `json:"system"`
  127. GPUs []GpuInfo `json:"gpus"`
  128. UnsupportedGPUs []UnsupportedGPUInfo `json:"unsupported_gpus"`
  129. DiscoveryErrors []string `json:"discovery_errors"`
  130. }
  131. // Return the optimal number of threads to use for inference
  132. func (si SystemInfo) GetOptimalThreadCount() int {
  133. if len(si.System.CPUs) == 0 {
  134. return 0
  135. }
  136. coreCount := 0
  137. for _, c := range si.System.CPUs {
  138. coreCount += c.CoreCount - c.EfficiencyCoreCount
  139. }
  140. return coreCount
  141. }
  142. // For each GPU, check if it does NOT support flash attention
  143. func (l GpuInfoList) FlashAttentionSupported() bool {
  144. for _, gpu := range l {
  145. supportsFA := gpu.Library == "metal" ||
  146. (gpu.Library == "cuda" && gpu.DriverMajor >= 7) ||
  147. gpu.Library == "rocm"
  148. if !supportsFA {
  149. return false
  150. }
  151. }
  152. return true
  153. }