ext_server.cpp 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. #include "ext_server.h"
  2. // Necessary evil since the server types are not defined in a header
  3. #include "server.cpp"
  4. // Expose the llama server as a callable extern "C" API
  5. llama_server_context *llama = NULL;
  6. std::atomic<bool> ext_server_running(false);
  7. std::thread ext_server_thread;
  8. void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err) {
  9. #if SERVER_VERBOSE != 1
  10. log_disable();
  11. #endif
  12. LOG_TEE("system info: %s", llama_print_system_info());
  13. assert(err != NULL && sparams != NULL);
  14. err->id = 0;
  15. err->msg[0] = '\0';
  16. try {
  17. llama = new llama_server_context;
  18. log_set_target(stdout);
  19. gpt_params params;
  20. params.n_ctx = sparams->n_ctx;
  21. params.n_batch = sparams->n_batch;
  22. if (sparams->n_threads > 0) {
  23. params.n_threads = sparams->n_threads;
  24. }
  25. params.n_parallel = sparams->n_parallel;
  26. params.rope_freq_base = sparams->rope_freq_base;
  27. params.rope_freq_scale = sparams->rope_freq_scale;
  28. if (sparams->memory_f16) {
  29. params.cache_type_k = "f16";
  30. params.cache_type_v = "f16";
  31. } else {
  32. params.cache_type_k = "f32";
  33. params.cache_type_v = "f32";
  34. }
  35. params.n_gpu_layers = sparams->n_gpu_layers;
  36. params.main_gpu = sparams->main_gpu;
  37. params.use_mlock = sparams->use_mlock;
  38. params.use_mmap = sparams->use_mmap;
  39. params.numa = sparams->numa;
  40. params.embedding = sparams->embedding;
  41. if (sparams->model != NULL) {
  42. params.model = sparams->model;
  43. }
  44. if (sparams->lora_adapters != NULL) {
  45. for (ext_server_lora_adapter *la = sparams->lora_adapters; la != NULL;
  46. la = la->next) {
  47. params.lora_adapter.push_back(std::make_tuple(la->adapter, la->scale));
  48. }
  49. params.use_mmap = false;
  50. }
  51. if (sparams->mmproj != NULL) {
  52. params.mmproj = std::string(sparams->mmproj);
  53. }
  54. llama_backend_init(params.numa);
  55. // load the model
  56. if (!llama->load_model(params)) {
  57. // TODO - consider modifying the logging logic or patching load_model so
  58. // we can capture more detailed error messages and pass them back to the
  59. // caller for better UX
  60. err->id = -1;
  61. snprintf(err->msg, err->msg_len, "error loading model %s",
  62. params.model.c_str());
  63. return;
  64. }
  65. llama->initialize();
  66. } catch (std::exception &e) {
  67. err->id = -1;
  68. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  69. } catch (...) {
  70. err->id = -1;
  71. snprintf(err->msg, err->msg_len,
  72. "Unknown exception initializing llama server");
  73. }
  74. }
  75. void llama_server_start() {
  76. assert(llama != NULL);
  77. // TODO mutex to protect thread creation
  78. ext_server_thread = std::thread([&]() {
  79. ext_server_running = true;
  80. try {
  81. LOG_TEE("llama server main loop starting\n");
  82. ggml_time_init();
  83. while (ext_server_running.load()) {
  84. if (!llama->update_slots()) {
  85. LOG_TEE(
  86. "unexpected error in llama server update_slots - exiting main "
  87. "loop\n");
  88. break;
  89. }
  90. }
  91. } catch (std::exception &e) {
  92. LOG_TEE("caught exception in llama server main loop: %s\n", e.what());
  93. } catch (...) {
  94. LOG_TEE("caught unknown exception in llama server main loop\n");
  95. }
  96. LOG_TEE("\nllama server shutting down\n");
  97. llama_backend_free();
  98. });
  99. }
  100. void llama_server_stop() {
  101. assert(llama != NULL);
  102. // TODO - too verbose, remove once things are solid
  103. LOG_TEE("requesting llama server shutdown\n");
  104. ext_server_running = false;
  105. // unblocks the update_slots() loop so it can clean up and exit
  106. llama->request_cancel(0);
  107. ext_server_thread.join();
  108. delete llama;
  109. llama = NULL;
  110. LOG_TEE("llama server shutdown complete\n");
  111. }
  112. void llama_server_completion(const char *json_req, ext_server_resp_t *resp) {
  113. assert(llama != NULL && json_req != NULL && resp != NULL);
  114. resp->id = -1;
  115. resp->msg[0] = '\0';
  116. try {
  117. json data = json::parse(json_req);
  118. resp->id = llama->request_completion(data, false, false, -1);
  119. } catch (std::exception &e) {
  120. snprintf(resp->msg, resp->msg_len, "exception %s", e.what());
  121. } catch (...) {
  122. snprintf(resp->msg, resp->msg_len, "Unknown exception during completion");
  123. }
  124. }
  125. void llama_server_completion_next_result(const int task_id,
  126. ext_server_task_result_t *resp) {
  127. assert(llama != NULL && resp != NULL);
  128. std::string msg;
  129. resp->id = -1;
  130. resp->stop = false;
  131. resp->error = false;
  132. resp->json_resp = NULL;
  133. std::string result_json;
  134. try {
  135. task_result result = llama->next_result(task_id);
  136. result_json =
  137. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace);
  138. resp->id = result.id;
  139. resp->stop = result.stop;
  140. resp->error = result.error;
  141. if (result.error) {
  142. llama->request_cancel(task_id);
  143. } else if (result.stop) {
  144. llama->request_cancel(task_id);
  145. }
  146. } catch (std::exception &e) {
  147. resp->error = true;
  148. resp->id = -1;
  149. result_json = "{\"error\":\"exception " + std::string(e.what()) + "\"}";
  150. LOG_TEE("llama server completion exception %s\n", e.what());
  151. } catch (...) {
  152. resp->error = true;
  153. resp->id = -1;
  154. result_json = "{\"error\":\"Unknown exception during completion\"}";
  155. LOG_TEE("llama server completion unknown exception\n");
  156. }
  157. const std::string::size_type size = result_json.size() + 1;
  158. resp->json_resp = new char[size];
  159. snprintf(resp->json_resp, size, "%s", result_json.c_str());
  160. }
  161. void llama_server_release_task_result(ext_server_task_result_t *result) {
  162. if (result == NULL || result->json_resp == NULL) {
  163. return;
  164. }
  165. delete[] result->json_resp;
  166. }
  167. void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err) {
  168. assert(llama != NULL && err != NULL);
  169. err->id = 0;
  170. err->msg[0] = '\0';
  171. try {
  172. llama->request_cancel(task_id);
  173. } catch (std::exception &e) {
  174. err->id = -1;
  175. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  176. } catch (...) {
  177. err->id = -1;
  178. snprintf(err->msg, err->msg_len,
  179. "Unknown exception completion cancel in llama server");
  180. }
  181. }
  182. void llama_server_tokenize(const char *json_req, char **json_resp,
  183. ext_server_resp_t *err) {
  184. assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  185. *json_resp = NULL;
  186. err->id = 0;
  187. err->msg[0] = '\0';
  188. try {
  189. const json body = json::parse(json_req);
  190. std::vector<llama_token> tokens;
  191. if (body.count("content") != 0) {
  192. tokens = llama->tokenize(body["content"], false);
  193. }
  194. const json data = format_tokenizer_response(tokens);
  195. std::string result_json = data.dump();
  196. const std::string::size_type size = result_json.size() + 1;
  197. *json_resp = new char[size];
  198. snprintf(*json_resp, size, "%s", result_json.c_str());
  199. } catch (std::exception &e) {
  200. err->id = -1;
  201. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  202. } catch (...) {
  203. err->id = -1;
  204. snprintf(err->msg, err->msg_len, "Unknown exception during tokenize");
  205. }
  206. }
  207. void llama_server_release_json_resp(char **json_resp) {
  208. if (json_resp == NULL || *json_resp == NULL) {
  209. return;
  210. }
  211. delete[] *json_resp;
  212. }
  213. void llama_server_detokenize(const char *json_req, char **json_resp,
  214. ext_server_resp_t *err) {
  215. assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  216. *json_resp = NULL;
  217. err->id = 0;
  218. err->msg[0] = '\0';
  219. try {
  220. const json body = json::parse(json_req);
  221. std::string content;
  222. if (body.count("tokens") != 0) {
  223. const std::vector<llama_token> tokens = body["tokens"];
  224. content = tokens_to_str(llama->ctx, tokens.cbegin(), tokens.cend());
  225. }
  226. const json data = format_detokenized_response(content);
  227. std::string result_json = data.dump();
  228. const std::string::size_type size = result_json.size() + 1;
  229. *json_resp = new char[size];
  230. snprintf(*json_resp, size, "%s", result_json.c_str());
  231. } catch (std::exception &e) {
  232. err->id = -1;
  233. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  234. } catch (...) {
  235. err->id = -1;
  236. snprintf(err->msg, err->msg_len, "Unknown exception during detokenize");
  237. }
  238. }
  239. void llama_server_embedding(const char *json_req, char **json_resp,
  240. ext_server_resp_t *err) {
  241. assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  242. *json_resp = NULL;
  243. err->id = 0;
  244. err->msg[0] = '\0';
  245. try {
  246. const json body = json::parse(json_req);
  247. json prompt;
  248. if (body.count("content") != 0) {
  249. prompt = body["content"];
  250. } else {
  251. prompt = "";
  252. }
  253. const int task_id = llama->request_completion(
  254. {{"prompt", prompt}, {"n_predict", 0}}, false, true, -1);
  255. task_result result = llama->next_result(task_id);
  256. std::string result_json = result.result_json.dump();
  257. const std::string::size_type size = result_json.size() + 1;
  258. *json_resp = new char[size];
  259. snprintf(*json_resp, size, "%s", result_json.c_str());
  260. } catch (std::exception &e) {
  261. err->id = -1;
  262. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  263. } catch (...) {
  264. err->id = -1;
  265. snprintf(err->msg, err->msg_len, "Unknown exception during embedding");
  266. }
  267. }