binding.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585
  1. // MIT License
  2. // Copyright (c) 2023 go-skynet authors
  3. // Permission is hereby granted, free of charge, to any person obtaining a copy
  4. // of this software and associated documentation files (the "Software"), to deal
  5. // in the Software without restriction, including without limitation the rights
  6. // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  7. // copies of the Software, and to permit persons to whom the Software is
  8. // furnished to do so, subject to the following conditions:
  9. // The above copyright notice and this permission notice shall be included in
  10. // all copies or substantial portions of the Software.
  11. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  12. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  14. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  15. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  16. // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  17. // SOFTWARE.
  18. #include "common.h"
  19. #include "llama.h"
  20. #include "binding.h"
  21. #include <cassert>
  22. #include <cinttypes>
  23. #include <cmath>
  24. #include <cstdio>
  25. #include <cstring>
  26. #include <fstream>
  27. #include <iostream>
  28. #include <regex>
  29. #include <sstream>
  30. #include <string>
  31. #include <vector>
  32. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
  33. #include <signal.h>
  34. #include <unistd.h>
  35. #elif defined(_WIN32)
  36. #define WIN32_LEAN_AND_MEAN
  37. #define NOMINMAX
  38. #include <signal.h>
  39. #include <windows.h>
  40. #endif
  41. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || \
  42. defined(_WIN32)
  43. void sigint_handler(int signo) {
  44. if (signo == SIGINT) {
  45. _exit(130);
  46. }
  47. }
  48. #endif
  49. int eval(void *p, void *c, char *text) {
  50. gpt_params *params = (gpt_params *)params;
  51. llama_context *ctx = (llama_context *)ctx;
  52. auto n_past = 0;
  53. auto last_n_tokens_data = std::vector<llama_token>(params->repeat_last_n, 0);
  54. auto tokens = std::vector<llama_token>(params->n_ctx);
  55. auto n_prompt_tokens =
  56. llama_tokenize(ctx, text, tokens.data(), tokens.size(), true);
  57. if (n_prompt_tokens < 1) {
  58. fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);
  59. return 1;
  60. }
  61. return llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past,
  62. params->n_threads);
  63. }
  64. int llama_predict(void *p, void *c, char *result, bool debug) {
  65. gpt_params *params = (gpt_params *)params;
  66. llama_context *ctx = (llama_context *)ctx;
  67. const int n_ctx = llama_n_ctx(ctx);
  68. if (params->seed <= 0) {
  69. params->seed = time(NULL);
  70. }
  71. std::mt19937 rng(params->seed);
  72. std::string path_session = params->path_prompt_cache;
  73. std::vector<llama_token> session_tokens;
  74. if (!path_session.empty()) {
  75. if (debug) {
  76. fprintf(stderr, "%s: attempting to load saved session from '%s'\n",
  77. __func__, path_session.c_str());
  78. }
  79. // fopen to check for existing session
  80. FILE *fp = std::fopen(path_session.c_str(), "rb");
  81. if (fp != NULL) {
  82. std::fclose(fp);
  83. session_tokens.resize(n_ctx);
  84. size_t n_token_count_out = 0;
  85. if (!llama_load_session_file(
  86. ctx, path_session.c_str(), session_tokens.data(),
  87. session_tokens.capacity(), &n_token_count_out)) {
  88. fprintf(stderr, "%s: error: failed to load session file '%s'\n",
  89. __func__, path_session.c_str());
  90. return 1;
  91. }
  92. session_tokens.resize(n_token_count_out);
  93. llama_set_rng_seed(ctx, params->seed);
  94. if (debug) {
  95. fprintf(stderr, "%s: loaded a session with prompt size of %d tokens\n",
  96. __func__, (int)session_tokens.size());
  97. }
  98. } else {
  99. if (debug) {
  100. fprintf(stderr, "%s: session file does not exist, will create\n",
  101. __func__);
  102. }
  103. }
  104. }
  105. std::vector<llama_token> embd_inp;
  106. if (!params->prompt.empty() || session_tokens.empty()) {
  107. // Add a space in front of the first character to match OG llama tokenizer
  108. // behavior
  109. params->prompt.insert(0, 1, ' ');
  110. embd_inp = ::llama_tokenize(ctx, params->prompt, true);
  111. } else {
  112. embd_inp = session_tokens;
  113. }
  114. // debug message about similarity of saved session, if applicable
  115. size_t n_matching_session_tokens = 0;
  116. if (session_tokens.size()) {
  117. for (llama_token id : session_tokens) {
  118. if (n_matching_session_tokens >= embd_inp.size() ||
  119. id != embd_inp[n_matching_session_tokens]) {
  120. break;
  121. }
  122. n_matching_session_tokens++;
  123. }
  124. if (debug) {
  125. if (params->prompt.empty() &&
  126. n_matching_session_tokens == embd_inp.size()) {
  127. fprintf(stderr, "%s: using full prompt from session file\n", __func__);
  128. } else if (n_matching_session_tokens >= embd_inp.size()) {
  129. fprintf(stderr, "%s: session file has exact match for prompt!\n",
  130. __func__);
  131. } else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
  132. fprintf(stderr,
  133. "%s: warning: session file has low similarity to prompt (%zu / "
  134. "%zu tokens); will mostly be reevaluated\n",
  135. __func__, n_matching_session_tokens, embd_inp.size());
  136. } else {
  137. fprintf(stderr, "%s: session file matches %zu / %zu tokens of prompt\n",
  138. __func__, n_matching_session_tokens, embd_inp.size());
  139. }
  140. }
  141. }
  142. // if we will use the cache for the full prompt without reaching the end of
  143. // the cache, force reevaluation of the last token token to recalculate the
  144. // cached logits
  145. if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() &&
  146. session_tokens.size() > embd_inp.size()) {
  147. session_tokens.resize(embd_inp.size() - 1);
  148. }
  149. // number of tokens to keep when resetting context
  150. if (params->n_keep < 0 || params->n_keep > (int)embd_inp.size()) {
  151. params->n_keep = (int)embd_inp.size();
  152. }
  153. // determine newline token
  154. auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
  155. // TODO: replace with ring-buffer
  156. std::vector<llama_token> last_n_tokens(n_ctx);
  157. std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
  158. bool need_to_save_session =
  159. !path_session.empty() && n_matching_session_tokens < embd_inp.size();
  160. int n_past = 0;
  161. int n_remain = params->n_predict;
  162. int n_consumed = 0;
  163. int n_session_consumed = 0;
  164. std::vector<llama_token> embd;
  165. std::string res = "";
  166. // do one empty run to warm up the model
  167. {
  168. const std::vector<llama_token> tmp = {
  169. llama_token_bos(),
  170. };
  171. llama_eval(ctx, tmp.data(), tmp.size(), 0, params->n_threads);
  172. llama_reset_timings(ctx);
  173. }
  174. while (n_remain != 0) {
  175. // predict
  176. if (embd.size() > 0) {
  177. // infinite text generation via context swapping
  178. // if we run out of context:
  179. // - take the n_keep first tokens from the original prompt (via n_past)
  180. // - take half of the last (n_ctx - n_keep) tokens and recompute the
  181. // logits in batches
  182. if (n_past + (int)embd.size() > n_ctx) {
  183. const int n_left = n_past - params->n_keep;
  184. // always keep the first token - BOS
  185. n_past = std::max(1, params->n_keep);
  186. // insert n_left/2 tokens at the start of embd from last_n_tokens
  187. embd.insert(embd.begin(),
  188. last_n_tokens.begin() + n_ctx - n_left / 2 - embd.size(),
  189. last_n_tokens.end() - embd.size());
  190. // stop saving session if we run out of context
  191. path_session.clear();
  192. }
  193. // try to reuse a matching prefix from the loaded session instead of
  194. // re-eval (via n_past)
  195. if (n_session_consumed < (int)session_tokens.size()) {
  196. size_t i = 0;
  197. for (; i < embd.size(); i++) {
  198. if (embd[i] != session_tokens[n_session_consumed]) {
  199. session_tokens.resize(n_session_consumed);
  200. break;
  201. }
  202. n_past++;
  203. n_session_consumed++;
  204. if (n_session_consumed >= (int)session_tokens.size()) {
  205. ++i;
  206. break;
  207. }
  208. }
  209. if (i > 0) {
  210. embd.erase(embd.begin(), embd.begin() + i);
  211. }
  212. }
  213. // evaluate tokens in batches
  214. // embd is typically prepared beforehand to fit within a batch, but not
  215. // always
  216. for (int i = 0; i < (int)embd.size(); i += params->n_batch) {
  217. int n_eval = (int)embd.size() - i;
  218. if (n_eval > params->n_batch) {
  219. n_eval = params->n_batch;
  220. }
  221. if (llama_eval(ctx, &embd[i], n_eval, n_past, params->n_threads)) {
  222. fprintf(stderr, "%s : failed to eval\n", __func__);
  223. return 1;
  224. }
  225. n_past += n_eval;
  226. }
  227. if (embd.size() > 0 && !path_session.empty()) {
  228. session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
  229. n_session_consumed = session_tokens.size();
  230. }
  231. }
  232. embd.clear();
  233. if ((int)embd_inp.size() <= n_consumed) {
  234. // out of user input, sample next token
  235. const float temp = params->temp;
  236. const int32_t top_k =
  237. params->top_k <= 0 ? llama_n_vocab(ctx) : params->top_k;
  238. const float top_p = params->top_p;
  239. const float tfs_z = params->tfs_z;
  240. const float typical_p = params->typical_p;
  241. const int32_t repeat_last_n =
  242. params->repeat_last_n < 0 ? n_ctx : params->repeat_last_n;
  243. const float repeat_penalty = params->repeat_penalty;
  244. const float alpha_presence = params->presence_penalty;
  245. const float alpha_frequency = params->frequency_penalty;
  246. const int mirostat = params->mirostat;
  247. const float mirostat_tau = params->mirostat_tau;
  248. const float mirostat_eta = params->mirostat_eta;
  249. const bool penalize_nl = params->penalize_nl;
  250. // optionally save the session on first sample (for faster prompt loading
  251. // next time)
  252. if (!path_session.empty() && need_to_save_session &&
  253. !params->prompt_cache_ro) {
  254. need_to_save_session = false;
  255. llama_save_session_file(ctx, path_session.c_str(),
  256. session_tokens.data(), session_tokens.size());
  257. }
  258. llama_token id = 0;
  259. {
  260. auto logits = llama_get_logits(ctx);
  261. auto n_vocab = llama_n_vocab(ctx);
  262. // Apply params.logit_bias map
  263. for (auto it = params->logit_bias.begin();
  264. it != params->logit_bias.end(); it++) {
  265. logits[it->first] += it->second;
  266. }
  267. std::vector<llama_token_data> candidates;
  268. candidates.reserve(n_vocab);
  269. for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
  270. candidates.emplace_back(
  271. llama_token_data{token_id, logits[token_id], 0.0f});
  272. }
  273. llama_token_data_array candidates_p = {candidates.data(),
  274. candidates.size(), false};
  275. // Apply penalties
  276. float nl_logit = logits[llama_token_nl()];
  277. auto last_n_repeat =
  278. std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
  279. llama_sample_repetition_penalty(
  280. ctx, &candidates_p,
  281. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  282. last_n_repeat, repeat_penalty);
  283. llama_sample_frequency_and_presence_penalties(
  284. ctx, &candidates_p,
  285. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  286. last_n_repeat, alpha_frequency, alpha_presence);
  287. if (!penalize_nl) {
  288. logits[llama_token_nl()] = nl_logit;
  289. }
  290. if (temp <= 0) {
  291. // Greedy sampling
  292. id = llama_sample_token_greedy(ctx, &candidates_p);
  293. } else {
  294. if (mirostat == 1) {
  295. static float mirostat_mu = 2.0f * mirostat_tau;
  296. const int mirostat_m = 100;
  297. llama_sample_temperature(ctx, &candidates_p, temp);
  298. id = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau,
  299. mirostat_eta, mirostat_m,
  300. &mirostat_mu);
  301. } else if (mirostat == 2) {
  302. static float mirostat_mu = 2.0f * mirostat_tau;
  303. llama_sample_temperature(ctx, &candidates_p, temp);
  304. id = llama_sample_token_mirostat_v2(
  305. ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
  306. } else {
  307. // Temperature sampling
  308. llama_sample_top_k(ctx, &candidates_p, top_k, 1);
  309. llama_sample_tail_free(ctx, &candidates_p, tfs_z, 1);
  310. llama_sample_typical(ctx, &candidates_p, typical_p, 1);
  311. llama_sample_top_p(ctx, &candidates_p, top_p, 1);
  312. llama_sample_temperature(ctx, &candidates_p, temp);
  313. id = llama_sample_token(ctx, &candidates_p);
  314. }
  315. }
  316. last_n_tokens.erase(last_n_tokens.begin());
  317. last_n_tokens.push_back(id);
  318. }
  319. // add it to the context
  320. embd.push_back(id);
  321. // decrement remaining sampling budget
  322. --n_remain;
  323. // call the token callback, no need to check if one is actually
  324. // registered, that will be handled on the Go side.
  325. auto token_str = llama_token_to_str(ctx, id);
  326. if (!tokenCallback(ctx, (char *)token_str)) {
  327. break;
  328. }
  329. } else {
  330. // some user input remains from prompt or interaction, forward it to
  331. // processing
  332. while ((int)embd_inp.size() > n_consumed) {
  333. embd.push_back(embd_inp[n_consumed]);
  334. last_n_tokens.erase(last_n_tokens.begin());
  335. last_n_tokens.push_back(embd_inp[n_consumed]);
  336. ++n_consumed;
  337. if ((int)embd.size() >= params->n_batch) {
  338. break;
  339. }
  340. }
  341. }
  342. for (auto id : embd) {
  343. res += llama_token_to_str(ctx, id);
  344. }
  345. // check for stop prompt
  346. if (params->antiprompt.size()) {
  347. std::string last_output;
  348. for (auto id : last_n_tokens) {
  349. last_output += llama_token_to_str(ctx, id);
  350. }
  351. // Check if each of the reverse prompts appears at the end of the output.
  352. for (std::string &antiprompt : params->antiprompt) {
  353. // size_t extra_padding = params.interactive ? 0 : 2;
  354. size_t extra_padding = 2;
  355. size_t search_start_pos =
  356. last_output.length() >
  357. static_cast<size_t>(antiprompt.length() + extra_padding)
  358. ? last_output.length() -
  359. static_cast<size_t>(antiprompt.length() + extra_padding)
  360. : 0;
  361. if (last_output.find(antiprompt.c_str(), search_start_pos) !=
  362. std::string::npos) {
  363. goto end;
  364. }
  365. }
  366. }
  367. // end of text token
  368. if (!embd.empty() && embd.back() == llama_token_eos()) {
  369. break;
  370. }
  371. }
  372. if (!path_session.empty() && params->prompt_cache_all &&
  373. !params->prompt_cache_ro) {
  374. if (debug) {
  375. fprintf(stderr, "\n%s: saving final output to session file '%s'\n",
  376. __func__, path_session.c_str());
  377. }
  378. llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(),
  379. session_tokens.size());
  380. }
  381. end:
  382. #if defined(_WIN32)
  383. signal(SIGINT, SIG_DFL);
  384. #endif
  385. if (debug) {
  386. llama_print_timings(ctx);
  387. llama_reset_timings(ctx);
  388. }
  389. strcpy(result, res.c_str());
  390. return 0;
  391. }
  392. void llama_binding_free_model(void *ctx) { llama_free((llama_context *)ctx); }
  393. void llama_free_params(void *params) { delete (gpt_params *)params; }
  394. void *llama_allocate_params(
  395. const char *prompt, int seed, int threads, int tokens, int top_k,
  396. float top_p, float temp, float repeat_penalty, int repeat_last_n,
  397. bool ignore_eos, bool memory_f16, int n_batch, int n_keep,
  398. const char **antiprompt, int antiprompt_count, float tfs_z, float typical_p,
  399. float frequency_penalty, float presence_penalty, int mirostat,
  400. float mirostat_eta, float mirostat_tau, bool penalize_nl,
  401. const char *logit_bias, const char *session_file, bool prompt_cache_all,
  402. bool mlock, bool mmap, const char *maingpu, const char *tensorsplit,
  403. bool prompt_cache_ro) {
  404. gpt_params *params = new gpt_params;
  405. params->seed = seed;
  406. params->n_threads = threads;
  407. params->n_predict = tokens;
  408. params->repeat_last_n = repeat_last_n;
  409. params->prompt_cache_ro = prompt_cache_ro;
  410. params->top_k = top_k;
  411. params->top_p = top_p;
  412. params->memory_f16 = memory_f16;
  413. params->temp = temp;
  414. params->use_mmap = mmap;
  415. params->use_mlock = mlock;
  416. params->repeat_penalty = repeat_penalty;
  417. params->n_batch = n_batch;
  418. params->n_keep = n_keep;
  419. if (maingpu[0] != '\0') {
  420. params->main_gpu = std::stoi(maingpu);
  421. }
  422. if (tensorsplit[0] != '\0') {
  423. std::string arg_next = tensorsplit;
  424. // split string by , and /
  425. const std::regex regex{R"([,/]+)"};
  426. std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
  427. std::vector<std::string> split_arg{it, {}};
  428. GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
  429. for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
  430. if (i < split_arg.size()) {
  431. params->tensor_split[i] = std::stof(split_arg[i]);
  432. } else {
  433. params->tensor_split[i] = 0.0f;
  434. }
  435. }
  436. }
  437. params->prompt_cache_all = prompt_cache_all;
  438. params->path_prompt_cache = session_file;
  439. if (ignore_eos) {
  440. params->logit_bias[llama_token_eos()] = -INFINITY;
  441. }
  442. if (antiprompt_count > 0) {
  443. for (int i = 0; i < antiprompt_count; i++) {
  444. params->antiprompt.push_back(std::string(antiprompt[i]));
  445. }
  446. }
  447. params->tfs_z = tfs_z;
  448. params->typical_p = typical_p;
  449. params->presence_penalty = presence_penalty;
  450. params->mirostat = mirostat;
  451. params->mirostat_eta = mirostat_eta;
  452. params->mirostat_tau = mirostat_tau;
  453. params->penalize_nl = penalize_nl;
  454. std::stringstream ss(logit_bias);
  455. llama_token key;
  456. char sign;
  457. std::string value_str;
  458. if (ss >> key && ss >> sign && std::getline(ss, value_str) &&
  459. (sign == '+' || sign == '-')) {
  460. params->logit_bias[key] =
  461. std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
  462. }
  463. params->frequency_penalty = frequency_penalty;
  464. params->prompt = prompt;
  465. return params;
  466. }
  467. void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16,
  468. bool mlock, bool embeddings, bool mmap, bool low_vram,
  469. bool vocab_only, int n_gpu_layers, int n_batch,
  470. const char *maingpu, const char *tensorsplit, bool numa) {
  471. auto lparams = llama_context_default_params();
  472. lparams.n_ctx = n_ctx;
  473. lparams.seed = n_seed;
  474. lparams.f16_kv = memory_f16;
  475. lparams.embedding = embeddings;
  476. lparams.use_mlock = mlock;
  477. lparams.n_gpu_layers = n_gpu_layers;
  478. lparams.use_mmap = mmap;
  479. lparams.low_vram = low_vram;
  480. lparams.vocab_only = vocab_only;
  481. if (maingpu[0] != '\0') {
  482. lparams.main_gpu = std::stoi(maingpu);
  483. }
  484. if (tensorsplit[0] != '\0') {
  485. std::string arg_next = tensorsplit;
  486. // split string by , and /
  487. const std::regex regex{R"([,/]+)"};
  488. std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
  489. std::vector<std::string> split_arg{it, {}};
  490. GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
  491. for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
  492. if (i < split_arg.size()) {
  493. lparams.tensor_split[i] = std::stof(split_arg[i]);
  494. } else {
  495. lparams.tensor_split[i] = 0.0f;
  496. }
  497. }
  498. }
  499. lparams.n_batch = n_batch;
  500. llama_init_backend(numa);
  501. struct llama_model *model = llama_load_model_from_file(fname, lparams);
  502. if (!model) {
  503. return nullptr;
  504. }
  505. return llama_new_context_with_model(model, lparams);
  506. }