ext_server.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. #include "ext_server.h"
  2. // Necessary evil since the server types are not defined in a header
  3. #include "server.cpp"
  4. // Low level API access to verify GPU access
  5. #if defined(GGML_USE_CUBLAS)
  6. #if defined(GGML_USE_HIPBLAS)
  7. #include <hip/hip_runtime.h>
  8. #include <hipblas/hipblas.h>
  9. #include <hip/hip_fp16.h>
  10. #ifdef __HIP_PLATFORM_AMD__
  11. // for rocblas_initialize()
  12. #include "rocblas/rocblas.h"
  13. #endif // __HIP_PLATFORM_AMD__
  14. #define cudaGetDevice hipGetDevice
  15. #define cudaError_t hipError_t
  16. #define cudaSuccess hipSuccess
  17. #define cudaGetErrorString hipGetErrorString
  18. #else
  19. #include <cuda_runtime.h>
  20. #include <cublas_v2.h>
  21. #include <cuda_fp16.h>
  22. #endif // defined(GGML_USE_HIPBLAS)
  23. #endif // GGML_USE_CUBLAS
  24. // Expose the llama server as a callable extern "C" API
  25. llama_server_context *llama = NULL;
  26. std::thread ext_server_thread;
  27. void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err) {
  28. assert(err != NULL && sparams != NULL);
  29. log_set_target(stderr);
  30. if (!sparams->verbose_logging) {
  31. server_verbose = true;
  32. log_disable();
  33. }
  34. LOG_TEE("system info: %s\n", llama_print_system_info());
  35. err->id = 0;
  36. err->msg[0] = '\0';
  37. try {
  38. llama = new llama_server_context;
  39. gpt_params params;
  40. params.n_ctx = sparams->n_ctx;
  41. params.n_batch = sparams->n_batch;
  42. if (sparams->n_threads > 0) {
  43. params.n_threads = sparams->n_threads;
  44. }
  45. params.n_parallel = sparams->n_parallel;
  46. params.rope_freq_base = sparams->rope_freq_base;
  47. params.rope_freq_scale = sparams->rope_freq_scale;
  48. if (sparams->memory_f16) {
  49. params.cache_type_k = "f16";
  50. params.cache_type_v = "f16";
  51. } else {
  52. params.cache_type_k = "f32";
  53. params.cache_type_v = "f32";
  54. }
  55. params.n_gpu_layers = sparams->n_gpu_layers;
  56. params.main_gpu = sparams->main_gpu;
  57. params.use_mlock = sparams->use_mlock;
  58. params.use_mmap = sparams->use_mmap;
  59. params.numa = sparams->numa;
  60. params.embedding = sparams->embedding;
  61. if (sparams->model != NULL) {
  62. params.model = sparams->model;
  63. }
  64. if (sparams->lora_adapters != NULL) {
  65. for (ext_server_lora_adapter *la = sparams->lora_adapters; la != NULL;
  66. la = la->next) {
  67. params.lora_adapter.push_back(std::make_tuple(la->adapter, la->scale));
  68. }
  69. params.use_mmap = false;
  70. }
  71. if (sparams->mmproj != NULL) {
  72. params.mmproj = std::string(sparams->mmproj);
  73. }
  74. #if defined(GGML_USE_CUBLAS)
  75. // Before attempting to init the backend which will assert on error, verify the CUDA/ROCM GPU is accessible
  76. LOG_TEE("Performing pre-initialization of GPU\n");
  77. int id;
  78. cudaError_t cudaErr = cudaGetDevice(&id);
  79. if (cudaErr != cudaSuccess) {
  80. err->id = -1;
  81. snprintf(err->msg, err->msg_len, "Unable to init GPU: %s", cudaGetErrorString(cudaErr));
  82. return;
  83. }
  84. #endif
  85. llama_backend_init(params.numa);
  86. // load the model
  87. if (!llama->load_model(params)) {
  88. // TODO - consider modifying the logging logic or patching load_model so
  89. // we can capture more detailed error messages and pass them back to the
  90. // caller for better UX
  91. err->id = -1;
  92. snprintf(err->msg, err->msg_len, "error loading model %s",
  93. params.model.c_str());
  94. return;
  95. }
  96. llama->initialize();
  97. } catch (std::exception &e) {
  98. err->id = -1;
  99. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  100. } catch (...) {
  101. err->id = -1;
  102. snprintf(err->msg, err->msg_len,
  103. "Unknown exception initializing llama server");
  104. }
  105. }
  106. void llama_server_start() {
  107. assert(llama != NULL);
  108. // TODO mutex to protect thread creation
  109. ext_server_thread = std::thread([&]() {
  110. try {
  111. LOG_TEE("llama server main loop starting\n");
  112. ggml_time_init();
  113. llama->queue_tasks.on_new_task(std::bind(
  114. &llama_server_context::process_single_task, llama, std::placeholders::_1));
  115. llama->queue_tasks.on_finish_multitask(std::bind(
  116. &llama_server_context::on_finish_multitask, llama, std::placeholders::_1));
  117. llama->queue_tasks.on_all_tasks_finished(std::bind(
  118. &llama_server_context::run_on_all_tasks_finished, llama));
  119. llama->queue_results.on_multitask_update(std::bind(
  120. &llama_server_queue::update_multitask,
  121. &llama->queue_tasks,
  122. std::placeholders::_1,
  123. std::placeholders::_2,
  124. std::placeholders::_3
  125. ));
  126. llama->queue_tasks.start_loop();
  127. } catch (std::exception &e) {
  128. LOG_TEE("caught exception in llama server main loop: %s\n", e.what());
  129. } catch (...) {
  130. LOG_TEE("caught unknown exception in llama server main loop\n");
  131. }
  132. LOG_TEE("\nllama server shutting down\n");
  133. llama_backend_free();
  134. });
  135. }
  136. void llama_server_stop() {
  137. assert(llama != NULL);
  138. LOG_TEE("\ninitiating shutdown - draining remaining tasks...\n");
  139. // This may take a while for any pending tasks to drain
  140. // TODO - consider a timeout to cancel tasks if it's taking too long
  141. llama->queue_tasks.terminate();
  142. ext_server_thread.join();
  143. delete llama;
  144. llama = NULL;
  145. LOG_TEE("llama server shutdown complete\n");
  146. }
  147. void llama_server_completion(const char *json_req, ext_server_resp_t *resp) {
  148. assert(llama != NULL && json_req != NULL && resp != NULL);
  149. resp->id = -1;
  150. resp->msg[0] = '\0';
  151. try {
  152. json data = json::parse(json_req);
  153. resp->id = llama->queue_tasks.get_new_id();
  154. llama->queue_results.add_waiting_task_id(resp->id);
  155. llama->request_completion(resp->id, data, false, false, -1);
  156. } catch (std::exception &e) {
  157. snprintf(resp->msg, resp->msg_len, "exception %s", e.what());
  158. } catch (...) {
  159. snprintf(resp->msg, resp->msg_len, "Unknown exception during completion");
  160. }
  161. }
  162. void llama_server_completion_next_result(const int task_id,
  163. ext_server_task_result_t *resp) {
  164. assert(llama != NULL && resp != NULL);
  165. std::string msg;
  166. resp->id = -1;
  167. resp->stop = false;
  168. resp->error = false;
  169. resp->json_resp = NULL;
  170. std::string result_json;
  171. try {
  172. task_result result = llama->queue_results.recv(task_id);
  173. result_json =
  174. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace);
  175. resp->id = result.id;
  176. resp->stop = result.stop;
  177. resp->error = result.error;
  178. if (result.error) {
  179. LOG_TEE("next result cancel on error\n");
  180. llama->request_cancel(task_id);
  181. LOG_TEE("next result removing waiting tak ID: %d\n", task_id);
  182. llama->queue_results.remove_waiting_task_id(task_id);
  183. } else if (result.stop) {
  184. LOG_TEE("next result cancel on stop\n");
  185. llama->request_cancel(task_id);
  186. LOG_TEE("next result removing waiting task ID: %d\n", task_id);
  187. llama->queue_results.remove_waiting_task_id(task_id);
  188. }
  189. } catch (std::exception &e) {
  190. resp->error = true;
  191. resp->id = -1;
  192. result_json = "{\"error\":\"exception " + std::string(e.what()) + "\"}";
  193. LOG_TEE("llama server completion exception %s\n", e.what());
  194. } catch (...) {
  195. resp->error = true;
  196. resp->id = -1;
  197. result_json = "{\"error\":\"Unknown exception during completion\"}";
  198. LOG_TEE("llama server completion unknown exception\n");
  199. }
  200. const std::string::size_type size = result_json.size() + 1;
  201. resp->json_resp = new char[size];
  202. snprintf(resp->json_resp, size, "%s", result_json.c_str());
  203. }
  204. void llama_server_release_task_result(ext_server_task_result_t *result) {
  205. if (result == NULL || result->json_resp == NULL) {
  206. return;
  207. }
  208. delete[] result->json_resp;
  209. }
  210. void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err) {
  211. assert(llama != NULL && err != NULL);
  212. err->id = 0;
  213. err->msg[0] = '\0';
  214. try {
  215. llama->request_cancel(task_id);
  216. llama->queue_results.remove_waiting_task_id(task_id);
  217. } catch (std::exception &e) {
  218. err->id = -1;
  219. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  220. } catch (...) {
  221. err->id = -1;
  222. snprintf(err->msg, err->msg_len,
  223. "Unknown exception completion cancel in llama server");
  224. }
  225. }
  226. void llama_server_tokenize(const char *json_req, char **json_resp,
  227. ext_server_resp_t *err) {
  228. assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  229. *json_resp = NULL;
  230. err->id = 0;
  231. err->msg[0] = '\0';
  232. try {
  233. const json body = json::parse(json_req);
  234. std::vector<llama_token> tokens;
  235. if (body.count("content") != 0) {
  236. tokens = llama->tokenize(body["content"], false);
  237. }
  238. const json data = format_tokenizer_response(tokens);
  239. std::string result_json = data.dump();
  240. const std::string::size_type size = result_json.size() + 1;
  241. *json_resp = new char[size];
  242. snprintf(*json_resp, size, "%s", result_json.c_str());
  243. } catch (std::exception &e) {
  244. err->id = -1;
  245. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  246. } catch (...) {
  247. err->id = -1;
  248. snprintf(err->msg, err->msg_len, "Unknown exception during tokenize");
  249. }
  250. }
  251. void llama_server_release_json_resp(char **json_resp) {
  252. if (json_resp == NULL || *json_resp == NULL) {
  253. return;
  254. }
  255. delete[] *json_resp;
  256. }
  257. void llama_server_detokenize(const char *json_req, char **json_resp,
  258. ext_server_resp_t *err) {
  259. assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  260. *json_resp = NULL;
  261. err->id = 0;
  262. err->msg[0] = '\0';
  263. try {
  264. const json body = json::parse(json_req);
  265. std::string content;
  266. if (body.count("tokens") != 0) {
  267. const std::vector<llama_token> tokens = body["tokens"];
  268. content = tokens_to_str(llama->ctx, tokens.cbegin(), tokens.cend());
  269. }
  270. const json data = format_detokenized_response(content);
  271. std::string result_json = data.dump();
  272. const std::string::size_type size = result_json.size() + 1;
  273. *json_resp = new char[size];
  274. snprintf(*json_resp, size, "%s", result_json.c_str());
  275. } catch (std::exception &e) {
  276. err->id = -1;
  277. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  278. } catch (...) {
  279. err->id = -1;
  280. snprintf(err->msg, err->msg_len, "Unknown exception during detokenize");
  281. }
  282. }
  283. void llama_server_embedding(const char *json_req, char **json_resp,
  284. ext_server_resp_t *err) {
  285. assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  286. *json_resp = NULL;
  287. err->id = 0;
  288. err->msg[0] = '\0';
  289. try {
  290. const json body = json::parse(json_req);
  291. json prompt;
  292. if (body.count("content") != 0) {
  293. prompt = body["content"];
  294. } else {
  295. prompt = "";
  296. }
  297. const int task_id = llama->queue_tasks.get_new_id();
  298. llama->queue_results.add_waiting_task_id(task_id);
  299. llama->request_completion(task_id, {{"prompt", prompt}, {"n_predict", 0}}, false, true, -1);
  300. task_result result = llama->queue_results.recv(task_id);
  301. std::string result_json = result.result_json.dump();
  302. const std::string::size_type size = result_json.size() + 1;
  303. *json_resp = new char[size];
  304. snprintf(*json_resp, size, "%s", result_json.c_str());
  305. llama->queue_results.remove_waiting_task_id(task_id);
  306. } catch (std::exception &e) {
  307. err->id = -1;
  308. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  309. } catch (...) {
  310. err->id = -1;
  311. snprintf(err->msg, err->msg_len, "Unknown exception during embedding");
  312. }
  313. }