gpu_info_cuda.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. #ifndef __APPLE__ // TODO - maybe consider nvidia support on intel macs?
  2. #include "gpu_info_cuda.h"
  3. #include <string.h>
  4. void cuda_init(char *cuda_lib_path, cuda_init_resp_t *resp) {
  5. nvmlReturn_t ret;
  6. resp->err = NULL;
  7. const int buflen = 256;
  8. char buf[buflen + 1];
  9. int i;
  10. struct lookup {
  11. char *s;
  12. void **p;
  13. } l[] = {
  14. {"nvmlInit_v2", (void *)&resp->ch.nvmlInit_v2},
  15. {"nvmlShutdown", (void *)&resp->ch.nvmlShutdown},
  16. {"nvmlDeviceGetHandleByIndex", (void *)&resp->ch.nvmlDeviceGetHandleByIndex},
  17. {"nvmlDeviceGetMemoryInfo", (void *)&resp->ch.nvmlDeviceGetMemoryInfo},
  18. {"nvmlDeviceGetCount_v2", (void *)&resp->ch.nvmlDeviceGetCount_v2},
  19. {"nvmlDeviceGetCudaComputeCapability", (void *)&resp->ch.nvmlDeviceGetCudaComputeCapability},
  20. {"nvmlSystemGetDriverVersion", (void *)&resp->ch.nvmlSystemGetDriverVersion},
  21. {"nvmlDeviceGetName", (void *)&resp->ch.nvmlDeviceGetName},
  22. {"nvmlDeviceGetSerial", (void *)&resp->ch.nvmlDeviceGetSerial},
  23. {"nvmlDeviceGetVbiosVersion", (void *)&resp->ch.nvmlDeviceGetVbiosVersion},
  24. {"nvmlDeviceGetBoardPartNumber", (void *)&resp->ch.nvmlDeviceGetBoardPartNumber},
  25. {"nvmlDeviceGetBrand", (void *)&resp->ch.nvmlDeviceGetBrand},
  26. {NULL, NULL},
  27. };
  28. resp->ch.handle = LOAD_LIBRARY(cuda_lib_path, RTLD_LAZY);
  29. if (!resp->ch.handle) {
  30. char *msg = LOAD_ERR();
  31. LOG(resp->ch.verbose, "library %s load err: %s\n", cuda_lib_path, msg);
  32. snprintf(buf, buflen,
  33. "Unable to load %s library to query for Nvidia GPUs: %s",
  34. cuda_lib_path, msg);
  35. free(msg);
  36. resp->err = strdup(buf);
  37. return;
  38. }
  39. // TODO once we've squashed the remaining corner cases remove this log
  40. LOG(resp->ch.verbose, "wiring nvidia management library functions in %s\n", cuda_lib_path);
  41. for (i = 0; l[i].s != NULL; i++) {
  42. // TODO once we've squashed the remaining corner cases remove this log
  43. LOG(resp->ch.verbose, "dlsym: %s\n", l[i].s);
  44. *l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s);
  45. if (!l[i].p) {
  46. resp->ch.handle = NULL;
  47. char *msg = LOAD_ERR();
  48. LOG(resp->ch.verbose, "dlerr: %s\n", msg);
  49. UNLOAD_LIBRARY(resp->ch.handle);
  50. snprintf(buf, buflen, "symbol lookup for %s failed: %s", l[i].s,
  51. msg);
  52. free(msg);
  53. resp->err = strdup(buf);
  54. return;
  55. }
  56. }
  57. ret = (*resp->ch.nvmlInit_v2)();
  58. if (ret != NVML_SUCCESS) {
  59. LOG(resp->ch.verbose, "nvmlInit_v2 err: %d\n", ret);
  60. UNLOAD_LIBRARY(resp->ch.handle);
  61. resp->ch.handle = NULL;
  62. snprintf(buf, buflen, "nvml vram init failure: %d", ret);
  63. resp->err = strdup(buf);
  64. }
  65. // Report driver version if we're in verbose mode, ignore errors
  66. ret = (*resp->ch.nvmlSystemGetDriverVersion)(buf, buflen);
  67. if (ret != NVML_SUCCESS) {
  68. LOG(resp->ch.verbose, "nvmlSystemGetDriverVersion failed: %d\n", ret);
  69. } else {
  70. LOG(resp->ch.verbose, "CUDA driver version: %s\n", buf);
  71. }
  72. }
  73. void cuda_check_vram(cuda_handle_t h, mem_info_t *resp) {
  74. resp->err = NULL;
  75. nvmlDevice_t device;
  76. nvmlMemory_t memInfo = {0};
  77. nvmlReturn_t ret;
  78. const int buflen = 256;
  79. char buf[buflen + 1];
  80. int i;
  81. if (h.handle == NULL) {
  82. resp->err = strdup("nvml handle sn't initialized");
  83. return;
  84. }
  85. ret = (*h.nvmlDeviceGetCount_v2)(&resp->count);
  86. if (ret != NVML_SUCCESS) {
  87. snprintf(buf, buflen, "unable to get device count: %d", ret);
  88. resp->err = strdup(buf);
  89. return;
  90. }
  91. resp->total = 0;
  92. resp->free = 0;
  93. for (i = 0; i < resp->count; i++) {
  94. ret = (*h.nvmlDeviceGetHandleByIndex)(i, &device);
  95. if (ret != NVML_SUCCESS) {
  96. snprintf(buf, buflen, "unable to get device handle %d: %d", i, ret);
  97. resp->err = strdup(buf);
  98. return;
  99. }
  100. ret = (*h.nvmlDeviceGetMemoryInfo)(device, &memInfo);
  101. if (ret != NVML_SUCCESS) {
  102. snprintf(buf, buflen, "device memory info lookup failure %d: %d", i, ret);
  103. resp->err = strdup(buf);
  104. return;
  105. }
  106. if (h.verbose) {
  107. nvmlBrandType_t brand = 0;
  108. // When in verbose mode, report more information about
  109. // the card we discover, but don't fail on error
  110. ret = (*h.nvmlDeviceGetName)(device, buf, buflen);
  111. if (ret != RSMI_STATUS_SUCCESS) {
  112. LOG(h.verbose, "nvmlDeviceGetName failed: %d\n", ret);
  113. } else {
  114. LOG(h.verbose, "[%d] CUDA device name: %s\n", i, buf);
  115. }
  116. ret = (*h.nvmlDeviceGetBoardPartNumber)(device, buf, buflen);
  117. if (ret != RSMI_STATUS_SUCCESS) {
  118. LOG(h.verbose, "nvmlDeviceGetBoardPartNumber failed: %d\n", ret);
  119. } else {
  120. LOG(h.verbose, "[%d] CUDA part number: %s\n", i, buf);
  121. }
  122. ret = (*h.nvmlDeviceGetSerial)(device, buf, buflen);
  123. if (ret != RSMI_STATUS_SUCCESS) {
  124. LOG(h.verbose, "nvmlDeviceGetSerial failed: %d\n", ret);
  125. } else {
  126. LOG(h.verbose, "[%d] CUDA S/N: %s\n", i, buf);
  127. }
  128. ret = (*h.nvmlDeviceGetVbiosVersion)(device, buf, buflen);
  129. if (ret != RSMI_STATUS_SUCCESS) {
  130. LOG(h.verbose, "nvmlDeviceGetVbiosVersion failed: %d\n", ret);
  131. } else {
  132. LOG(h.verbose, "[%d] CUDA vbios version: %s\n", i, buf);
  133. }
  134. ret = (*h.nvmlDeviceGetBrand)(device, &brand);
  135. if (ret != RSMI_STATUS_SUCCESS) {
  136. LOG(h.verbose, "nvmlDeviceGetBrand failed: %d\n", ret);
  137. } else {
  138. LOG(h.verbose, "[%d] CUDA brand: %d\n", i, brand);
  139. }
  140. }
  141. LOG(h.verbose, "[%d] CUDA totalMem %ld\n", i, memInfo.total);
  142. LOG(h.verbose, "[%d] CUDA usedMem %ld\n", i, memInfo.free);
  143. resp->total += memInfo.total;
  144. resp->free += memInfo.free;
  145. }
  146. }
  147. void cuda_compute_capability(cuda_handle_t h, cuda_compute_capability_t *resp) {
  148. resp->err = NULL;
  149. resp->major = 0;
  150. resp->minor = 0;
  151. nvmlDevice_t device;
  152. int major = 0;
  153. int minor = 0;
  154. nvmlReturn_t ret;
  155. const int buflen = 256;
  156. char buf[buflen + 1];
  157. int i;
  158. if (h.handle == NULL) {
  159. resp->err = strdup("nvml handle not initialized");
  160. return;
  161. }
  162. unsigned int devices;
  163. ret = (*h.nvmlDeviceGetCount_v2)(&devices);
  164. if (ret != NVML_SUCCESS) {
  165. snprintf(buf, buflen, "unable to get device count: %d", ret);
  166. resp->err = strdup(buf);
  167. return;
  168. }
  169. for (i = 0; i < devices; i++) {
  170. ret = (*h.nvmlDeviceGetHandleByIndex)(i, &device);
  171. if (ret != NVML_SUCCESS) {
  172. snprintf(buf, buflen, "unable to get device handle %d: %d", i, ret);
  173. resp->err = strdup(buf);
  174. return;
  175. }
  176. ret = (*h.nvmlDeviceGetCudaComputeCapability)(device, &major, &minor);
  177. if (ret != NVML_SUCCESS) {
  178. snprintf(buf, buflen, "device compute capability lookup failure %d: %d", i, ret);
  179. resp->err = strdup(buf);
  180. return;
  181. }
  182. // Report the lowest major.minor we detect as that limits our compatibility
  183. if (resp->major == 0 || resp->major > major ) {
  184. resp->major = major;
  185. resp->minor = minor;
  186. } else if ( resp->major == major && resp->minor > minor ) {
  187. resp->minor = minor;
  188. }
  189. }
  190. }
  191. #endif // __APPLE__