binding.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705
  1. // MIT License
  2. // Copyright (c) 2023 go-skynet authors
  3. // Permission is hereby granted, free of charge, to any person obtaining a copy
  4. // of this software and associated documentation files (the "Software"), to deal
  5. // in the Software without restriction, including without limitation the rights
  6. // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  7. // copies of the Software, and to permit persons to whom the Software is
  8. // furnished to do so, subject to the following conditions:
  9. // The above copyright notice and this permission notice shall be included in all
  10. // copies or substantial portions of the Software.
  11. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  12. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  14. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  15. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  16. // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  17. // SOFTWARE.
  18. #include "common.h"
  19. #include "llama.h"
  20. #include "binding.h"
  21. #include <cassert>
  22. #include <cinttypes>
  23. #include <cmath>
  24. #include <cstdio>
  25. #include <cstring>
  26. #include <fstream>
  27. #include <iostream>
  28. #include <regex>
  29. #include <sstream>
  30. #include <string>
  31. #include <vector>
  32. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
  33. #include <signal.h>
  34. #include <unistd.h>
  35. #elif defined(_WIN32)
  36. #define WIN32_LEAN_AND_MEAN
  37. #define NOMINMAX
  38. #include <signal.h>
  39. #include <windows.h>
  40. #endif
  41. #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || \
  42. defined(_WIN32)
  43. void sigint_handler(int signo) {
  44. if (signo == SIGINT) {
  45. _exit(130);
  46. }
  47. }
  48. #endif
  49. int get_embeddings(void *params_ptr, void *state_pr, float *res_embeddings) {
  50. gpt_params *params_p = (gpt_params *)params_ptr;
  51. llama_context *ctx = (llama_context *)state_pr;
  52. gpt_params params = *params_p;
  53. if (params.seed <= 0) {
  54. params.seed = time(NULL);
  55. }
  56. std::mt19937 rng(params.seed);
  57. llama_init_backend(params.numa);
  58. int n_past = 0;
  59. // Add a space in front of the first character to match OG llama tokenizer
  60. // behavior
  61. params.prompt.insert(0, 1, ' ');
  62. // tokenize the prompt
  63. auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
  64. // determine newline token
  65. auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
  66. if (embd_inp.size() > 0) {
  67. if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past,
  68. params.n_threads)) {
  69. fprintf(stderr, "%s : failed to eval\n", __func__);
  70. return 1;
  71. }
  72. }
  73. const int n_embd = llama_n_embd(ctx);
  74. const auto embeddings = llama_get_embeddings(ctx);
  75. for (int i = 0; i < n_embd; i++) {
  76. res_embeddings[i] = embeddings[i];
  77. }
  78. return 0;
  79. }
  80. int get_token_embeddings(void *params_ptr, void *state_pr, int *tokens,
  81. int tokenSize, float *res_embeddings) {
  82. gpt_params *params_p = (gpt_params *)params_ptr;
  83. llama_context *ctx = (llama_context *)state_pr;
  84. gpt_params params = *params_p;
  85. for (int i = 0; i < tokenSize; i++) {
  86. auto token_str = llama_token_to_str(ctx, tokens[i]);
  87. if (token_str == nullptr) {
  88. continue;
  89. }
  90. std::vector<std::string> my_vector;
  91. std::string str_token(token_str); // create a new std::string from the char*
  92. params_p->prompt += str_token;
  93. }
  94. return get_embeddings(params_ptr, state_pr, res_embeddings);
  95. }
  96. int eval(void *params_ptr, void *state_pr, char *text) {
  97. gpt_params *params_p = (gpt_params *)params_ptr;
  98. llama_context *ctx = (llama_context *)state_pr;
  99. auto n_past = 0;
  100. auto last_n_tokens_data =
  101. std::vector<llama_token>(params_p->repeat_last_n, 0);
  102. auto tokens = std::vector<llama_token>(params_p->n_ctx);
  103. auto n_prompt_tokens =
  104. llama_tokenize(ctx, text, tokens.data(), tokens.size(), true);
  105. if (n_prompt_tokens < 1) {
  106. fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);
  107. return 1;
  108. }
  109. // evaluate prompt
  110. return llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past,
  111. params_p->n_threads);
  112. }
  113. int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
  114. gpt_params *params_p = (gpt_params *)params_ptr;
  115. llama_context *ctx = (llama_context *)state_pr;
  116. gpt_params params = *params_p;
  117. const int n_ctx = llama_n_ctx(ctx);
  118. if (params.seed <= 0) {
  119. params.seed = time(NULL);
  120. }
  121. std::mt19937 rng(params.seed);
  122. std::string path_session = params.path_prompt_cache;
  123. std::vector<llama_token> session_tokens;
  124. if (!path_session.empty()) {
  125. if (debug) {
  126. fprintf(stderr, "%s: attempting to load saved session from '%s'\n",
  127. __func__, path_session.c_str());
  128. }
  129. // fopen to check for existing session
  130. FILE *fp = std::fopen(path_session.c_str(), "rb");
  131. if (fp != NULL) {
  132. std::fclose(fp);
  133. session_tokens.resize(n_ctx);
  134. size_t n_token_count_out = 0;
  135. if (!llama_load_session_file(
  136. ctx, path_session.c_str(), session_tokens.data(),
  137. session_tokens.capacity(), &n_token_count_out)) {
  138. fprintf(stderr, "%s: error: failed to load session file '%s'\n",
  139. __func__, path_session.c_str());
  140. return 1;
  141. }
  142. session_tokens.resize(n_token_count_out);
  143. llama_set_rng_seed(ctx, params.seed);
  144. if (debug) {
  145. fprintf(stderr, "%s: loaded a session with prompt size of %d tokens\n",
  146. __func__, (int)session_tokens.size());
  147. }
  148. } else {
  149. if (debug) {
  150. fprintf(stderr, "%s: session file does not exist, will create\n",
  151. __func__);
  152. }
  153. }
  154. }
  155. std::vector<llama_token> embd_inp;
  156. if (!params.prompt.empty() || session_tokens.empty()) {
  157. // Add a space in front of the first character to match OG llama tokenizer
  158. // behavior
  159. params.prompt.insert(0, 1, ' ');
  160. embd_inp = ::llama_tokenize(ctx, params.prompt, true);
  161. } else {
  162. embd_inp = session_tokens;
  163. }
  164. // debug message about similarity of saved session, if applicable
  165. size_t n_matching_session_tokens = 0;
  166. if (session_tokens.size()) {
  167. for (llama_token id : session_tokens) {
  168. if (n_matching_session_tokens >= embd_inp.size() ||
  169. id != embd_inp[n_matching_session_tokens]) {
  170. break;
  171. }
  172. n_matching_session_tokens++;
  173. }
  174. if (debug) {
  175. if (params.prompt.empty() &&
  176. n_matching_session_tokens == embd_inp.size()) {
  177. fprintf(stderr, "%s: using full prompt from session file\n", __func__);
  178. } else if (n_matching_session_tokens >= embd_inp.size()) {
  179. fprintf(stderr, "%s: session file has exact match for prompt!\n",
  180. __func__);
  181. } else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
  182. fprintf(stderr,
  183. "%s: warning: session file has low similarity to prompt (%zu / "
  184. "%zu tokens); will mostly be reevaluated\n",
  185. __func__, n_matching_session_tokens, embd_inp.size());
  186. } else {
  187. fprintf(stderr, "%s: session file matches %zu / %zu tokens of prompt\n",
  188. __func__, n_matching_session_tokens, embd_inp.size());
  189. }
  190. }
  191. }
  192. // if we will use the cache for the full prompt without reaching the end of
  193. // the cache, force reevaluation of the last token token to recalculate the
  194. // cached logits
  195. if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() &&
  196. session_tokens.size() > embd_inp.size()) {
  197. session_tokens.resize(embd_inp.size() - 1);
  198. }
  199. // number of tokens to keep when resetting context
  200. if (params.n_keep < 0 || params.n_keep > (int)embd_inp.size()) {
  201. params.n_keep = (int)embd_inp.size();
  202. }
  203. // determine newline token
  204. auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
  205. // TODO: replace with ring-buffer
  206. std::vector<llama_token> last_n_tokens(n_ctx);
  207. std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
  208. bool need_to_save_session =
  209. !path_session.empty() && n_matching_session_tokens < embd_inp.size();
  210. int n_past = 0;
  211. int n_remain = params.n_predict;
  212. int n_consumed = 0;
  213. int n_session_consumed = 0;
  214. std::vector<llama_token> embd;
  215. std::string res = "";
  216. // do one empty run to warm up the model
  217. {
  218. const std::vector<llama_token> tmp = {
  219. llama_token_bos(),
  220. };
  221. llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
  222. llama_reset_timings(ctx);
  223. }
  224. while (n_remain != 0) {
  225. // predict
  226. if (embd.size() > 0) {
  227. // infinite text generation via context swapping
  228. // if we run out of context:
  229. // - take the n_keep first tokens from the original prompt (via n_past)
  230. // - take half of the last (n_ctx - n_keep) tokens and recompute the
  231. // logits in batches
  232. if (n_past + (int)embd.size() > n_ctx) {
  233. const int n_left = n_past - params.n_keep;
  234. // always keep the first token - BOS
  235. n_past = std::max(1, params.n_keep);
  236. // insert n_left/2 tokens at the start of embd from last_n_tokens
  237. embd.insert(embd.begin(),
  238. last_n_tokens.begin() + n_ctx - n_left / 2 - embd.size(),
  239. last_n_tokens.end() - embd.size());
  240. // stop saving session if we run out of context
  241. path_session.clear();
  242. // printf("\n---\n");
  243. // printf("resetting: '");
  244. // for (int i = 0; i < (int) embd.size(); i++) {
  245. // printf("%s", llama_token_to_str(ctx, embd[i]));
  246. // }
  247. // printf("'\n");
  248. // printf("\n---\n");
  249. }
  250. // try to reuse a matching prefix from the loaded session instead of
  251. // re-eval (via n_past)
  252. if (n_session_consumed < (int)session_tokens.size()) {
  253. size_t i = 0;
  254. for (; i < embd.size(); i++) {
  255. if (embd[i] != session_tokens[n_session_consumed]) {
  256. session_tokens.resize(n_session_consumed);
  257. break;
  258. }
  259. n_past++;
  260. n_session_consumed++;
  261. if (n_session_consumed >= (int)session_tokens.size()) {
  262. ++i;
  263. break;
  264. }
  265. }
  266. if (i > 0) {
  267. embd.erase(embd.begin(), embd.begin() + i);
  268. }
  269. }
  270. // evaluate tokens in batches
  271. // embd is typically prepared beforehand to fit within a batch, but not
  272. // always
  273. for (int i = 0; i < (int)embd.size(); i += params.n_batch) {
  274. int n_eval = (int)embd.size() - i;
  275. if (n_eval > params.n_batch) {
  276. n_eval = params.n_batch;
  277. }
  278. if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) {
  279. fprintf(stderr, "%s : failed to eval\n", __func__);
  280. return 1;
  281. }
  282. n_past += n_eval;
  283. }
  284. if (embd.size() > 0 && !path_session.empty()) {
  285. session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
  286. n_session_consumed = session_tokens.size();
  287. }
  288. }
  289. embd.clear();
  290. if ((int)embd_inp.size() <= n_consumed) {
  291. // out of user input, sample next token
  292. const float temp = params.temp;
  293. const int32_t top_k =
  294. params.top_k <= 0 ? llama_n_vocab(ctx) : params.top_k;
  295. const float top_p = params.top_p;
  296. const float tfs_z = params.tfs_z;
  297. const float typical_p = params.typical_p;
  298. const int32_t repeat_last_n =
  299. params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
  300. const float repeat_penalty = params.repeat_penalty;
  301. const float alpha_presence = params.presence_penalty;
  302. const float alpha_frequency = params.frequency_penalty;
  303. const int mirostat = params.mirostat;
  304. const float mirostat_tau = params.mirostat_tau;
  305. const float mirostat_eta = params.mirostat_eta;
  306. const bool penalize_nl = params.penalize_nl;
  307. // optionally save the session on first sample (for faster prompt loading
  308. // next time)
  309. if (!path_session.empty() && need_to_save_session &&
  310. !params.prompt_cache_ro) {
  311. need_to_save_session = false;
  312. llama_save_session_file(ctx, path_session.c_str(),
  313. session_tokens.data(), session_tokens.size());
  314. }
  315. llama_token id = 0;
  316. {
  317. auto logits = llama_get_logits(ctx);
  318. auto n_vocab = llama_n_vocab(ctx);
  319. // Apply params.logit_bias map
  320. for (auto it = params.logit_bias.begin(); it != params.logit_bias.end();
  321. it++) {
  322. logits[it->first] += it->second;
  323. }
  324. std::vector<llama_token_data> candidates;
  325. candidates.reserve(n_vocab);
  326. for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
  327. candidates.emplace_back(
  328. llama_token_data{token_id, logits[token_id], 0.0f});
  329. }
  330. llama_token_data_array candidates_p = {candidates.data(),
  331. candidates.size(), false};
  332. // Apply penalties
  333. float nl_logit = logits[llama_token_nl()];
  334. auto last_n_repeat =
  335. std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
  336. llama_sample_repetition_penalty(
  337. ctx, &candidates_p,
  338. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  339. last_n_repeat, repeat_penalty);
  340. llama_sample_frequency_and_presence_penalties(
  341. ctx, &candidates_p,
  342. last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
  343. last_n_repeat, alpha_frequency, alpha_presence);
  344. if (!penalize_nl) {
  345. logits[llama_token_nl()] = nl_logit;
  346. }
  347. if (temp <= 0) {
  348. // Greedy sampling
  349. id = llama_sample_token_greedy(ctx, &candidates_p);
  350. } else {
  351. if (mirostat == 1) {
  352. static float mirostat_mu = 2.0f * mirostat_tau;
  353. const int mirostat_m = 100;
  354. llama_sample_temperature(ctx, &candidates_p, temp);
  355. id = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau,
  356. mirostat_eta, mirostat_m,
  357. &mirostat_mu);
  358. } else if (mirostat == 2) {
  359. static float mirostat_mu = 2.0f * mirostat_tau;
  360. llama_sample_temperature(ctx, &candidates_p, temp);
  361. id = llama_sample_token_mirostat_v2(
  362. ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
  363. } else {
  364. // Temperature sampling
  365. llama_sample_top_k(ctx, &candidates_p, top_k, 1);
  366. llama_sample_tail_free(ctx, &candidates_p, tfs_z, 1);
  367. llama_sample_typical(ctx, &candidates_p, typical_p, 1);
  368. llama_sample_top_p(ctx, &candidates_p, top_p, 1);
  369. llama_sample_temperature(ctx, &candidates_p, temp);
  370. id = llama_sample_token(ctx, &candidates_p);
  371. }
  372. }
  373. // printf("`%d`", candidates_p.size);
  374. last_n_tokens.erase(last_n_tokens.begin());
  375. last_n_tokens.push_back(id);
  376. }
  377. // add it to the context
  378. embd.push_back(id);
  379. // decrement remaining sampling budget
  380. --n_remain;
  381. // call the token callback, no need to check if one is actually
  382. // registered, that will be handled on the Go side.
  383. auto token_str = llama_token_to_str(ctx, id);
  384. if (!tokenCallback(state_pr, (char *)token_str)) {
  385. break;
  386. }
  387. } else {
  388. // some user input remains from prompt or interaction, forward it to
  389. // processing
  390. while ((int)embd_inp.size() > n_consumed) {
  391. embd.push_back(embd_inp[n_consumed]);
  392. last_n_tokens.erase(last_n_tokens.begin());
  393. last_n_tokens.push_back(embd_inp[n_consumed]);
  394. ++n_consumed;
  395. if ((int)embd.size() >= params.n_batch) {
  396. break;
  397. }
  398. }
  399. }
  400. for (auto id : embd) {
  401. res += llama_token_to_str(ctx, id);
  402. }
  403. // check for stop prompt
  404. if (params.antiprompt.size()) {
  405. std::string last_output;
  406. for (auto id : last_n_tokens) {
  407. last_output += llama_token_to_str(ctx, id);
  408. }
  409. // Check if each of the reverse prompts appears at the end of the output.
  410. for (std::string &antiprompt : params.antiprompt) {
  411. // size_t extra_padding = params.interactive ? 0 : 2;
  412. size_t extra_padding = 2;
  413. size_t search_start_pos =
  414. last_output.length() >
  415. static_cast<size_t>(antiprompt.length() + extra_padding)
  416. ? last_output.length() -
  417. static_cast<size_t>(antiprompt.length() + extra_padding)
  418. : 0;
  419. if (last_output.find(antiprompt.c_str(), search_start_pos) !=
  420. std::string::npos) {
  421. goto end;
  422. }
  423. }
  424. }
  425. // end of text token
  426. if (!embd.empty() && embd.back() == llama_token_eos()) {
  427. break;
  428. }
  429. }
  430. if (!path_session.empty() && params.prompt_cache_all &&
  431. !params.prompt_cache_ro) {
  432. if (debug) {
  433. fprintf(stderr, "\n%s: saving final output to session file '%s'\n",
  434. __func__, path_session.c_str());
  435. }
  436. llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(),
  437. session_tokens.size());
  438. }
  439. end:
  440. #if defined(_WIN32)
  441. signal(SIGINT, SIG_DFL);
  442. #endif
  443. if (debug) {
  444. llama_print_timings(ctx);
  445. llama_reset_timings(ctx);
  446. }
  447. strcpy(result, res.c_str());
  448. return 0;
  449. }
  450. void llama_binding_free_model(void *state_ptr) {
  451. llama_context *ctx = (llama_context *)state_ptr;
  452. llama_free(ctx);
  453. }
  454. void llama_free_params(void *params_ptr) {
  455. gpt_params *params = (gpt_params *)params_ptr;
  456. delete params;
  457. }
  458. int load_state(void *ctx, char *statefile, char *modes) {
  459. llama_context *state = (llama_context *)ctx;
  460. const llama_context *constState = static_cast<const llama_context *>(state);
  461. const size_t state_size = llama_get_state_size(state);
  462. uint8_t *state_mem = new uint8_t[state_size];
  463. {
  464. FILE *fp_read = fopen(statefile, modes);
  465. if (state_size != llama_get_state_size(constState)) {
  466. fprintf(stderr, "\n%s : failed to validate state size\n", __func__);
  467. return 1;
  468. }
  469. const size_t ret = fread(state_mem, 1, state_size, fp_read);
  470. if (ret != state_size) {
  471. fprintf(stderr, "\n%s : failed to read state\n", __func__);
  472. return 1;
  473. }
  474. llama_set_state_data(
  475. state, state_mem); // could also read directly from memory mapped file
  476. fclose(fp_read);
  477. }
  478. return 0;
  479. }
  480. void save_state(void *ctx, char *dst, char *modes) {
  481. llama_context *state = (llama_context *)ctx;
  482. const size_t state_size = llama_get_state_size(state);
  483. uint8_t *state_mem = new uint8_t[state_size];
  484. // Save state (rng, logits, embedding and kv_cache) to file
  485. {
  486. FILE *fp_write = fopen(dst, modes);
  487. llama_copy_state_data(
  488. state, state_mem); // could also copy directly to memory mapped file
  489. fwrite(state_mem, 1, state_size, fp_write);
  490. fclose(fp_write);
  491. }
  492. }
  493. void *llama_allocate_params(
  494. const char *prompt, int seed, int threads, int tokens, int top_k,
  495. float top_p, float temp, float repeat_penalty, int repeat_last_n,
  496. bool ignore_eos, bool memory_f16, int n_batch, int n_keep,
  497. const char **antiprompt, int antiprompt_count, float tfs_z, float typical_p,
  498. float frequency_penalty, float presence_penalty, int mirostat,
  499. float mirostat_eta, float mirostat_tau, bool penalize_nl,
  500. const char *logit_bias, bool mlock, bool mmap, const char *maingpu,
  501. const char *tensorsplit) {
  502. gpt_params *params = new gpt_params;
  503. params->seed = seed;
  504. params->n_threads = threads;
  505. params->n_predict = tokens;
  506. params->repeat_last_n = repeat_last_n;
  507. params->top_k = top_k;
  508. params->top_p = top_p;
  509. params->memory_f16 = memory_f16;
  510. params->temp = temp;
  511. params->use_mmap = mmap;
  512. params->use_mlock = mlock;
  513. params->repeat_penalty = repeat_penalty;
  514. params->n_batch = n_batch;
  515. params->n_keep = n_keep;
  516. if (maingpu[0] != '\0') {
  517. params->main_gpu = std::stoi(maingpu);
  518. }
  519. if (tensorsplit[0] != '\0') {
  520. std::string arg_next = tensorsplit;
  521. // split string by , and /
  522. const std::regex regex{R"([,/]+)"};
  523. std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
  524. std::vector<std::string> split_arg{it, {}};
  525. GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
  526. for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
  527. if (i < split_arg.size()) {
  528. params->tensor_split[i] = std::stof(split_arg[i]);
  529. } else {
  530. params->tensor_split[i] = 0.0f;
  531. }
  532. }
  533. }
  534. if (ignore_eos) {
  535. params->logit_bias[llama_token_eos()] = -INFINITY;
  536. }
  537. for (int i = 0; i < antiprompt_count; i++) {
  538. params->antiprompt.push_back(antiprompt[i]);
  539. }
  540. params->tfs_z = tfs_z;
  541. params->typical_p = typical_p;
  542. params->presence_penalty = presence_penalty;
  543. params->mirostat = mirostat;
  544. params->mirostat_eta = mirostat_eta;
  545. params->mirostat_tau = mirostat_tau;
  546. params->penalize_nl = penalize_nl;
  547. std::stringstream ss(logit_bias);
  548. llama_token key;
  549. char sign;
  550. std::string value_str;
  551. if (ss >> key && ss >> sign && std::getline(ss, value_str) &&
  552. (sign == '+' || sign == '-')) {
  553. params->logit_bias[key] =
  554. std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
  555. }
  556. params->frequency_penalty = frequency_penalty;
  557. params->prompt = prompt;
  558. return params;
  559. }
  560. void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16,
  561. bool mlock, bool embeddings, bool mmap, bool low_vram,
  562. bool vocab_only, int n_gpu_layers, int n_batch,
  563. const char *maingpu, const char *tensorsplit, bool numa) {
  564. // load the model
  565. auto lparams = llama_context_default_params();
  566. lparams.n_ctx = n_ctx;
  567. lparams.seed = n_seed;
  568. lparams.f16_kv = memory_f16;
  569. lparams.embedding = embeddings;
  570. lparams.use_mlock = mlock;
  571. lparams.n_gpu_layers = n_gpu_layers;
  572. lparams.use_mmap = mmap;
  573. lparams.low_vram = low_vram;
  574. lparams.vocab_only = vocab_only;
  575. if (maingpu[0] != '\0') {
  576. lparams.main_gpu = std::stoi(maingpu);
  577. }
  578. if (tensorsplit[0] != '\0') {
  579. std::string arg_next = tensorsplit;
  580. // split string by , and /
  581. const std::regex regex{R"([,/]+)"};
  582. std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
  583. std::vector<std::string> split_arg{it, {}};
  584. GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
  585. for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
  586. if (i < split_arg.size()) {
  587. lparams.tensor_split[i] = std::stof(split_arg[i]);
  588. } else {
  589. lparams.tensor_split[i] = 0.0f;
  590. }
  591. }
  592. }
  593. lparams.n_batch = n_batch;
  594. llama_init_backend(numa);
  595. void *res = nullptr;
  596. try {
  597. res = llama_init_from_file(fname, lparams);
  598. } catch (std::runtime_error &e) {
  599. fprintf(stderr, "failed %s", e.what());
  600. return res;
  601. }
  602. return res;
  603. }