llama-util.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579
  1. /**
  2. * llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
  3. *
  4. * MIT License
  5. *
  6. * Copyright (c) 2023 Georgi Gerganov
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. * SOFTWARE.
  25. */
  26. // Internal header to be included only by llama.cpp.
  27. // Contains wrappers around OS interfaces.
  28. #ifndef LLAMA_UTIL_H
  29. #define LLAMA_UTIL_H
  30. #include <cstdio>
  31. #include <cstdint>
  32. #include <cerrno>
  33. #include <cstring>
  34. #include <cstdarg>
  35. #include <cstdlib>
  36. #include <climits>
  37. #include <string>
  38. #include <vector>
  39. #include <stdexcept>
  40. #ifdef __has_include
  41. #if __has_include(<unistd.h>)
  42. #include <unistd.h>
  43. #if defined(_POSIX_MAPPED_FILES)
  44. #include <sys/mman.h>
  45. #endif
  46. #if defined(_POSIX_MEMLOCK_RANGE)
  47. #include <sys/resource.h>
  48. #endif
  49. #endif
  50. #endif
  51. #if defined(_WIN32)
  52. #define WIN32_LEAN_AND_MEAN
  53. #ifndef NOMINMAX
  54. #define NOMINMAX
  55. #endif
  56. #include <windows.h>
  57. #include <io.h>
  58. #include <stdio.h> // for _fseeki64
  59. #endif
  60. #define LLAMA_ASSERT(x) \
  61. do { \
  62. if (!(x)) { \
  63. fprintf(stderr, "LLAMA_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
  64. abort(); \
  65. } \
  66. } while (0)
  67. #ifdef __GNUC__
  68. #ifdef __MINGW32__
  69. __attribute__((format(gnu_printf, 1, 2)))
  70. #else
  71. __attribute__((format(printf, 1, 2)))
  72. #endif
  73. #endif
  74. static std::string format(const char * fmt, ...) {
  75. va_list ap, ap2;
  76. va_start(ap, fmt);
  77. va_copy(ap2, ap);
  78. int size = vsnprintf(NULL, 0, fmt, ap);
  79. LLAMA_ASSERT(size >= 0 && size < INT_MAX);
  80. std::vector<char> buf(size + 1);
  81. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  82. LLAMA_ASSERT(size2 == size);
  83. va_end(ap2);
  84. va_end(ap);
  85. return std::string(buf.data(), size);
  86. }
  87. struct llama_file {
  88. // use FILE * so we don't have to re-open the file to mmap
  89. FILE * fp;
  90. size_t size;
  91. llama_file(const char * fname, const char * mode) {
  92. fp = std::fopen(fname, mode);
  93. if (fp == NULL) {
  94. throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
  95. }
  96. seek(0, SEEK_END);
  97. size = tell();
  98. seek(0, SEEK_SET);
  99. }
  100. size_t tell() const {
  101. #ifdef _WIN32
  102. __int64 ret = _ftelli64(fp);
  103. #else
  104. long ret = std::ftell(fp);
  105. #endif
  106. LLAMA_ASSERT(ret != -1); // this really shouldn't fail
  107. return (size_t) ret;
  108. }
  109. void seek(size_t offset, int whence) {
  110. #ifdef _WIN32
  111. int ret = _fseeki64(fp, (__int64) offset, whence);
  112. #else
  113. int ret = std::fseek(fp, (long) offset, whence);
  114. #endif
  115. LLAMA_ASSERT(ret == 0); // same
  116. }
  117. void read_raw(void * ptr, size_t len) const {
  118. if (len == 0) {
  119. return;
  120. }
  121. errno = 0;
  122. std::size_t ret = std::fread(ptr, len, 1, fp);
  123. if (ferror(fp)) {
  124. throw std::runtime_error(format("read error: %s", strerror(errno)));
  125. }
  126. if (ret != 1) {
  127. throw std::runtime_error(std::string("unexpectedly reached end of file"));
  128. }
  129. }
  130. std::uint32_t read_u32() {
  131. std::uint32_t ret;
  132. read_raw(&ret, sizeof(ret));
  133. return ret;
  134. }
  135. std::string read_string(std::uint32_t len) {
  136. std::vector<char> chars(len);
  137. read_raw(chars.data(), len);
  138. return std::string(chars.data(), len);
  139. }
  140. void write_raw(const void * ptr, size_t len) const {
  141. if (len == 0) {
  142. return;
  143. }
  144. errno = 0;
  145. size_t ret = std::fwrite(ptr, len, 1, fp);
  146. if (ret != 1) {
  147. throw std::runtime_error(format("write error: %s", strerror(errno)));
  148. }
  149. }
  150. void write_u32(std::uint32_t val) {
  151. write_raw(&val, sizeof(val));
  152. }
  153. ~llama_file() {
  154. if (fp) {
  155. std::fclose(fp);
  156. }
  157. }
  158. };
  159. // llama_context_data
  160. struct llama_data_context {
  161. virtual void write(const void * src, size_t size) = 0;
  162. virtual size_t get_size_written() = 0;
  163. virtual ~llama_data_context() = default;
  164. };
  165. struct llama_data_buffer_context : llama_data_context {
  166. uint8_t* ptr;
  167. size_t size_written = 0;
  168. llama_data_buffer_context(uint8_t * p) : ptr(p) {}
  169. void write(const void * src, size_t size) override {
  170. memcpy(ptr, src, size);
  171. ptr += size;
  172. size_written += size;
  173. }
  174. size_t get_size_written() override {
  175. return size_written;
  176. }
  177. };
  178. struct llama_data_file_context : llama_data_context {
  179. llama_file* file;
  180. size_t size_written = 0;
  181. llama_data_file_context(llama_file * f) : file(f) {}
  182. void write(const void * src, size_t size) override {
  183. file->write_raw(src, size);
  184. size_written += size;
  185. }
  186. size_t get_size_written() override {
  187. return size_written;
  188. }
  189. };
  190. #if defined(_WIN32)
  191. static std::string llama_format_win_err(DWORD err) {
  192. LPSTR buf;
  193. size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
  194. NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
  195. if (!size) {
  196. return "FormatMessageA failed";
  197. }
  198. std::string ret(buf, size);
  199. LocalFree(buf);
  200. return ret;
  201. }
  202. #endif
  203. struct llama_mmap {
  204. void * addr;
  205. size_t size;
  206. llama_mmap(const llama_mmap &) = delete;
  207. #ifdef _POSIX_MAPPED_FILES
  208. static constexpr bool SUPPORTED = true;
  209. llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
  210. size = file->size;
  211. int fd = fileno(file->fp);
  212. int flags = MAP_SHARED;
  213. // prefetch/readahead impairs performance on NUMA systems
  214. if (numa) { prefetch = 0; }
  215. #ifdef __linux__
  216. if (prefetch >= file->size) { flags |= MAP_POPULATE; }
  217. #endif
  218. addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
  219. if (addr == MAP_FAILED) {
  220. throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
  221. }
  222. if (prefetch > 0) {
  223. // Advise the kernel to preload the mapped memory
  224. if (madvise(addr, std::min(file->size, prefetch), MADV_WILLNEED)) {
  225. fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n",
  226. strerror(errno));
  227. }
  228. }
  229. if (numa) {
  230. // advise the kernel not to use readahead
  231. // (because the next page might not belong on the same node)
  232. if (madvise(addr, file->size, MADV_RANDOM)) {
  233. fprintf(stderr, "warning: madvise(.., MADV_RANDOM) failed: %s\n",
  234. strerror(errno));
  235. }
  236. }
  237. }
  238. ~llama_mmap() {
  239. munmap(addr, size);
  240. }
  241. #elif defined(_WIN32)
  242. static constexpr bool SUPPORTED = true;
  243. llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) {
  244. (void) numa;
  245. size = file->size;
  246. HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
  247. HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
  248. DWORD error = GetLastError();
  249. if (hMapping == NULL) {
  250. throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
  251. }
  252. addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
  253. error = GetLastError();
  254. CloseHandle(hMapping);
  255. if (addr == NULL) {
  256. throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
  257. }
  258. if (prefetch) {
  259. // The PrefetchVirtualMemory API is only present on Windows 8 and above, so we
  260. // will dynamically load it using GetProcAddress.
  261. BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
  262. HMODULE hKernel32;
  263. // This call is guaranteed to succeed.
  264. hKernel32 = GetModuleHandleW(L"kernel32.dll");
  265. // This call may fail if on a pre-Win8 system.
  266. pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
  267. if (pPrefetchVirtualMemory) {
  268. // Advise the kernel to preload the mapped memory.
  269. WIN32_MEMORY_RANGE_ENTRY range;
  270. range.VirtualAddress = addr;
  271. range.NumberOfBytes = (SIZE_T)size;
  272. if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
  273. fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
  274. llama_format_win_err(GetLastError()).c_str());
  275. }
  276. }
  277. }
  278. }
  279. ~llama_mmap() {
  280. if (!UnmapViewOfFile(addr)) {
  281. fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
  282. llama_format_win_err(GetLastError()).c_str());
  283. }
  284. }
  285. #else
  286. static constexpr bool SUPPORTED = false;
  287. llama_mmap(struct llama_file *, bool prefetch = true, bool numa = false) {
  288. (void) prefetch;
  289. (void) numa;
  290. throw std::runtime_error(std::string("mmap not supported"));
  291. }
  292. #endif
  293. };
  294. // Represents some region of memory being locked using mlock or VirtualLock;
  295. // will automatically unlock on destruction.
  296. struct llama_mlock {
  297. void * addr = NULL;
  298. size_t size = 0;
  299. bool failed_already = false;
  300. llama_mlock() {}
  301. llama_mlock(const llama_mlock &) = delete;
  302. ~llama_mlock() {
  303. if (size) {
  304. raw_unlock(addr, size);
  305. }
  306. }
  307. void init(void * ptr) {
  308. LLAMA_ASSERT(addr == NULL && size == 0);
  309. addr = ptr;
  310. }
  311. void grow_to(size_t target_size) {
  312. LLAMA_ASSERT(addr);
  313. if (failed_already) {
  314. return;
  315. }
  316. size_t granularity = lock_granularity();
  317. target_size = (target_size + granularity - 1) & ~(granularity - 1);
  318. if (target_size > size) {
  319. if (raw_lock((uint8_t *) addr + size, target_size - size)) {
  320. size = target_size;
  321. } else {
  322. failed_already = true;
  323. }
  324. }
  325. }
  326. #ifdef _POSIX_MEMLOCK_RANGE
  327. static constexpr bool SUPPORTED = true;
  328. size_t lock_granularity() {
  329. return (size_t) sysconf(_SC_PAGESIZE);
  330. }
  331. #ifdef __APPLE__
  332. #define MLOCK_SUGGESTION \
  333. "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
  334. "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
  335. #else
  336. #define MLOCK_SUGGESTION \
  337. "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
  338. #endif
  339. bool raw_lock(const void * addr, size_t size) {
  340. if (!mlock(addr, size)) {
  341. return true;
  342. } else {
  343. char* errmsg = std::strerror(errno);
  344. bool suggest = (errno == ENOMEM);
  345. // Check if the resource limit is fine after all
  346. struct rlimit lock_limit;
  347. if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit))
  348. suggest = false;
  349. if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size))
  350. suggest = false;
  351. fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
  352. size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
  353. return false;
  354. }
  355. }
  356. #undef MLOCK_SUGGESTION
  357. void raw_unlock(void * addr, size_t size) {
  358. if (munlock(addr, size)) {
  359. fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
  360. }
  361. }
  362. #elif defined(_WIN32)
  363. static constexpr bool SUPPORTED = true;
  364. size_t lock_granularity() {
  365. SYSTEM_INFO si;
  366. GetSystemInfo(&si);
  367. return (size_t) si.dwPageSize;
  368. }
  369. bool raw_lock(void * ptr, size_t len) {
  370. for (int tries = 1; ; tries++) {
  371. if (VirtualLock(ptr, len)) {
  372. return true;
  373. }
  374. if (tries == 2) {
  375. fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
  376. len, size, llama_format_win_err(GetLastError()).c_str());
  377. return false;
  378. }
  379. // It failed but this was only the first try; increase the working
  380. // set size and try again.
  381. SIZE_T min_ws_size, max_ws_size;
  382. if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
  383. fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
  384. llama_format_win_err(GetLastError()).c_str());
  385. return false;
  386. }
  387. // Per MSDN: "The maximum number of pages that a process can lock
  388. // is equal to the number of pages in its minimum working set minus
  389. // a small overhead."
  390. // Hopefully a megabyte is enough overhead:
  391. size_t increment = len + 1048576;
  392. // The minimum must be <= the maximum, so we need to increase both:
  393. min_ws_size += increment;
  394. max_ws_size += increment;
  395. if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
  396. fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
  397. llama_format_win_err(GetLastError()).c_str());
  398. return false;
  399. }
  400. }
  401. }
  402. void raw_unlock(void * ptr, size_t len) {
  403. if (!VirtualUnlock(ptr, len)) {
  404. fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
  405. llama_format_win_err(GetLastError()).c_str());
  406. }
  407. }
  408. #else
  409. static constexpr bool SUPPORTED = false;
  410. size_t lock_granularity() {
  411. return (size_t) 65536;
  412. }
  413. bool raw_lock(const void * addr, size_t len) {
  414. fprintf(stderr, "warning: mlock not supported on this system\n");
  415. return false;
  416. }
  417. void raw_unlock(const void * addr, size_t len) {}
  418. #endif
  419. };
  420. // Replacement for std::vector<uint8_t> that doesn't require zero-initialization.
  421. struct llama_buffer {
  422. uint8_t * addr = NULL;
  423. size_t size = 0;
  424. llama_buffer() = default;
  425. void resize(size_t len) {
  426. #ifdef GGML_USE_METAL
  427. free(addr);
  428. int result = posix_memalign((void **) &addr, getpagesize(), len);
  429. if (result == 0) {
  430. memset(addr, 0, len);
  431. }
  432. else {
  433. addr = NULL;
  434. }
  435. #else
  436. delete[] addr;
  437. addr = new uint8_t[len];
  438. #endif
  439. size = len;
  440. }
  441. ~llama_buffer() {
  442. #ifdef GGML_USE_METAL
  443. free(addr);
  444. #else
  445. delete[] addr;
  446. #endif
  447. addr = NULL;
  448. }
  449. // disable copy and move
  450. llama_buffer(const llama_buffer&) = delete;
  451. llama_buffer(llama_buffer&&) = delete;
  452. llama_buffer& operator=(const llama_buffer&) = delete;
  453. llama_buffer& operator=(llama_buffer&&) = delete;
  454. };
  455. #ifdef GGML_USE_CUBLAS
  456. #include "ggml-cuda.h"
  457. struct llama_ctx_buffer {
  458. uint8_t * addr = NULL;
  459. bool is_cuda;
  460. size_t size = 0;
  461. llama_ctx_buffer() = default;
  462. void resize(size_t size) {
  463. free();
  464. addr = (uint8_t *) ggml_cuda_host_malloc(size);
  465. if (addr) {
  466. is_cuda = true;
  467. }
  468. else {
  469. // fall back to pageable memory
  470. addr = new uint8_t[size];
  471. is_cuda = false;
  472. }
  473. this->size = size;
  474. }
  475. void free() {
  476. if (addr) {
  477. if (is_cuda) {
  478. ggml_cuda_host_free(addr);
  479. }
  480. else {
  481. delete[] addr;
  482. }
  483. }
  484. addr = NULL;
  485. }
  486. ~llama_ctx_buffer() {
  487. free();
  488. }
  489. // disable copy and move
  490. llama_ctx_buffer(const llama_ctx_buffer&) = delete;
  491. llama_ctx_buffer(llama_ctx_buffer&&) = delete;
  492. llama_ctx_buffer& operator=(const llama_ctx_buffer&) = delete;
  493. llama_ctx_buffer& operator=(llama_ctx_buffer&&) = delete;
  494. };
  495. #else
  496. typedef llama_buffer llama_ctx_buffer;
  497. #endif
  498. #endif