ext_server.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. #include "ext_server.h"
  2. #include <atomic>
  3. // Necessary evil since the server types are not defined in a header
  4. #include "server.cpp"
  5. // Low level API access to verify GPU access
  6. #if defined(GGML_USE_CUBLAS)
  7. #if defined(GGML_USE_HIPBLAS)
  8. #include <hip/hip_runtime.h>
  9. #include <hipblas/hipblas.h>
  10. #include <hip/hip_fp16.h>
  11. #ifdef __HIP_PLATFORM_AMD__
  12. // for rocblas_initialize()
  13. #include "rocblas/rocblas.h"
  14. #endif // __HIP_PLATFORM_AMD__
  15. #define cudaGetDevice hipGetDevice
  16. #define cudaError_t hipError_t
  17. #define cudaSuccess hipSuccess
  18. #define cudaGetErrorString hipGetErrorString
  19. #else
  20. #include <cuda_runtime.h>
  21. #include <cublas_v2.h>
  22. #include <cuda_fp16.h>
  23. #endif // defined(GGML_USE_HIPBLAS)
  24. #endif // GGML_USE_CUBLAS
  25. // Expose the llama server as a callable extern "C" API
  26. llama_server_context *llama = NULL;
  27. std::thread ext_server_thread;
  28. bool shutting_down = false;
  29. std::atomic_int recv_counter;
  30. // RAII wrapper for tracking in-flight recv calls
  31. class atomicRecv {
  32. public:
  33. atomicRecv(std::atomic<int> &atomic) : atomic(atomic) {
  34. ++this->atomic;
  35. }
  36. ~atomicRecv() {
  37. --this->atomic;
  38. }
  39. private:
  40. std::atomic<int> &atomic;
  41. };
  42. void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err) {
  43. recv_counter = 0;
  44. assert(err != NULL && sparams != NULL);
  45. log_set_target(stderr);
  46. if (!sparams->verbose_logging) {
  47. server_verbose = true;
  48. log_disable();
  49. }
  50. LOG_TEE("system info: %s\n", llama_print_system_info());
  51. err->id = 0;
  52. err->msg[0] = '\0';
  53. try {
  54. llama = new llama_server_context;
  55. gpt_params params;
  56. params.n_ctx = sparams->n_ctx;
  57. params.n_batch = sparams->n_batch;
  58. if (sparams->n_threads > 0) {
  59. params.n_threads = sparams->n_threads;
  60. }
  61. params.n_parallel = sparams->n_parallel;
  62. params.rope_freq_base = sparams->rope_freq_base;
  63. params.rope_freq_scale = sparams->rope_freq_scale;
  64. if (sparams->memory_f16) {
  65. params.cache_type_k = "f16";
  66. params.cache_type_v = "f16";
  67. } else {
  68. params.cache_type_k = "f32";
  69. params.cache_type_v = "f32";
  70. }
  71. params.n_gpu_layers = sparams->n_gpu_layers;
  72. params.main_gpu = sparams->main_gpu;
  73. params.use_mlock = sparams->use_mlock;
  74. params.use_mmap = sparams->use_mmap;
  75. params.numa = sparams->numa;
  76. params.embedding = sparams->embedding;
  77. if (sparams->model != NULL) {
  78. params.model = sparams->model;
  79. }
  80. if (sparams->lora_adapters != NULL) {
  81. for (ext_server_lora_adapter *la = sparams->lora_adapters; la != NULL;
  82. la = la->next) {
  83. params.lora_adapter.push_back(std::make_tuple(la->adapter, la->scale));
  84. }
  85. params.use_mmap = false;
  86. }
  87. if (sparams->mmproj != NULL) {
  88. params.mmproj = std::string(sparams->mmproj);
  89. }
  90. #if defined(GGML_USE_CUBLAS)
  91. // Before attempting to init the backend which will assert on error, verify the CUDA/ROCM GPU is accessible
  92. LOG_TEE("Performing pre-initialization of GPU\n");
  93. int id;
  94. cudaError_t cudaErr = cudaGetDevice(&id);
  95. if (cudaErr != cudaSuccess) {
  96. err->id = -1;
  97. snprintf(err->msg, err->msg_len, "Unable to init GPU: %s", cudaGetErrorString(cudaErr));
  98. return;
  99. }
  100. #endif
  101. llama_backend_init(params.numa);
  102. // load the model
  103. if (!llama->load_model(params)) {
  104. // TODO - consider modifying the logging logic or patching load_model so
  105. // we can capture more detailed error messages and pass them back to the
  106. // caller for better UX
  107. err->id = -1;
  108. snprintf(err->msg, err->msg_len, "error loading model %s",
  109. params.model.c_str());
  110. return;
  111. }
  112. llama->initialize();
  113. } catch (std::exception &e) {
  114. err->id = -1;
  115. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  116. } catch (...) {
  117. err->id = -1;
  118. snprintf(err->msg, err->msg_len,
  119. "Unknown exception initializing llama server");
  120. }
  121. }
  122. void llama_server_start() {
  123. assert(llama != NULL);
  124. // TODO mutex to protect thread creation
  125. ext_server_thread = std::thread([&]() {
  126. try {
  127. LOG_TEE("llama server main loop starting\n");
  128. ggml_time_init();
  129. llama->queue_tasks.on_new_task(std::bind(
  130. &llama_server_context::process_single_task, llama, std::placeholders::_1));
  131. llama->queue_tasks.on_finish_multitask(std::bind(
  132. &llama_server_context::on_finish_multitask, llama, std::placeholders::_1));
  133. llama->queue_tasks.on_all_tasks_finished(std::bind(
  134. &llama_server_context::run_on_all_tasks_finished, llama));
  135. llama->queue_results.on_multitask_update(std::bind(
  136. &llama_server_queue::update_multitask,
  137. &llama->queue_tasks,
  138. std::placeholders::_1,
  139. std::placeholders::_2,
  140. std::placeholders::_3
  141. ));
  142. llama->queue_tasks.start_loop();
  143. } catch (std::exception &e) {
  144. LOG_TEE("caught exception in llama server main loop: %s\n", e.what());
  145. } catch (...) {
  146. LOG_TEE("caught unknown exception in llama server main loop\n");
  147. }
  148. LOG_TEE("\nllama server shutting down\n");
  149. llama_backend_free();
  150. });
  151. }
  152. void llama_server_stop() {
  153. assert(llama != NULL);
  154. // Shutdown any in-flight requests and block incoming requests.
  155. LOG_TEE("\ninitiating shutdown - draining remaining tasks...\n");
  156. shutting_down = true;
  157. while (recv_counter.load() > 0) {
  158. std::this_thread::sleep_for(std::chrono::milliseconds(50));
  159. }
  160. // This may take a while for any pending tasks to drain
  161. // TODO - consider a timeout to cancel tasks if it's taking too long
  162. llama->queue_tasks.terminate();
  163. ext_server_thread.join();
  164. delete llama;
  165. llama = NULL;
  166. LOG_TEE("llama server shutdown complete\n");
  167. shutting_down = false;
  168. }
  169. void llama_server_completion(const char *json_req, ext_server_resp_t *resp) {
  170. assert(llama != NULL && json_req != NULL && resp != NULL);
  171. resp->id = -1;
  172. resp->msg[0] = '\0';
  173. try {
  174. if (shutting_down) {
  175. throw std::runtime_error("server shutting down");
  176. }
  177. json data = json::parse(json_req);
  178. resp->id = llama->queue_tasks.get_new_id();
  179. llama->queue_results.add_waiting_task_id(resp->id);
  180. llama->request_completion(resp->id, data, false, false, -1);
  181. } catch (std::exception &e) {
  182. snprintf(resp->msg, resp->msg_len, "exception %s", e.what());
  183. } catch (...) {
  184. snprintf(resp->msg, resp->msg_len, "Unknown exception during completion");
  185. }
  186. }
  187. void llama_server_completion_next_result(const int task_id,
  188. ext_server_task_result_t *resp) {
  189. assert(llama != NULL && resp != NULL);
  190. std::string msg;
  191. resp->id = -1;
  192. resp->stop = false;
  193. resp->error = false;
  194. resp->json_resp = NULL;
  195. std::string result_json;
  196. try {
  197. atomicRecv ar(recv_counter);
  198. task_result result = llama->queue_results.recv(task_id);
  199. result_json =
  200. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace);
  201. resp->id = result.id;
  202. resp->stop = result.stop;
  203. resp->error = result.error;
  204. if (result.error) {
  205. LOG_TEE("next result cancel on error\n");
  206. llama->request_cancel(task_id);
  207. LOG_TEE("next result removing waiting tak ID: %d\n", task_id);
  208. llama->queue_results.remove_waiting_task_id(task_id);
  209. } else if (result.stop) {
  210. LOG_TEE("next result cancel on stop\n");
  211. llama->request_cancel(task_id);
  212. LOG_TEE("next result removing waiting task ID: %d\n", task_id);
  213. llama->queue_results.remove_waiting_task_id(task_id);
  214. } else if (shutting_down) {
  215. LOG_TEE("aborting completion due to shutdown %d\n", task_id);
  216. llama->request_cancel(task_id);
  217. llama->queue_results.remove_waiting_task_id(task_id);
  218. resp->stop = true;
  219. }
  220. } catch (std::exception &e) {
  221. resp->error = true;
  222. resp->id = -1;
  223. result_json = "{\"error\":\"exception " + std::string(e.what()) + "\"}";
  224. LOG_TEE("llama server completion exception %s\n", e.what());
  225. } catch (...) {
  226. resp->error = true;
  227. resp->id = -1;
  228. result_json = "{\"error\":\"Unknown exception during completion\"}";
  229. LOG_TEE("llama server completion unknown exception\n");
  230. }
  231. const std::string::size_type size = result_json.size() + 1;
  232. resp->json_resp = new char[size];
  233. snprintf(resp->json_resp, size, "%s", result_json.c_str());
  234. }
  235. void llama_server_release_task_result(ext_server_task_result_t *result) {
  236. if (result == NULL || result->json_resp == NULL) {
  237. return;
  238. }
  239. delete[] result->json_resp;
  240. }
  241. void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err) {
  242. assert(llama != NULL && err != NULL);
  243. err->id = 0;
  244. err->msg[0] = '\0';
  245. try {
  246. llama->request_cancel(task_id);
  247. llama->queue_results.remove_waiting_task_id(task_id);
  248. } catch (std::exception &e) {
  249. err->id = -1;
  250. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  251. } catch (...) {
  252. err->id = -1;
  253. snprintf(err->msg, err->msg_len,
  254. "Unknown exception completion cancel in llama server");
  255. }
  256. }
  257. void llama_server_tokenize(const char *json_req, char **json_resp,
  258. ext_server_resp_t *err) {
  259. assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  260. *json_resp = NULL;
  261. err->id = 0;
  262. err->msg[0] = '\0';
  263. try {
  264. if (shutting_down) {
  265. throw std::runtime_error("server shutting down");
  266. }
  267. const json body = json::parse(json_req);
  268. std::vector<llama_token> tokens;
  269. if (body.count("content") != 0) {
  270. tokens = llama->tokenize(body["content"], false);
  271. }
  272. const json data = format_tokenizer_response(tokens);
  273. std::string result_json = data.dump();
  274. const std::string::size_type size = result_json.size() + 1;
  275. *json_resp = new char[size];
  276. snprintf(*json_resp, size, "%s", result_json.c_str());
  277. } catch (std::exception &e) {
  278. err->id = -1;
  279. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  280. } catch (...) {
  281. err->id = -1;
  282. snprintf(err->msg, err->msg_len, "Unknown exception during tokenize");
  283. }
  284. }
  285. void llama_server_release_json_resp(char **json_resp) {
  286. if (json_resp == NULL || *json_resp == NULL) {
  287. return;
  288. }
  289. delete[] *json_resp;
  290. }
  291. void llama_server_detokenize(const char *json_req, char **json_resp,
  292. ext_server_resp_t *err) {
  293. assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  294. *json_resp = NULL;
  295. err->id = 0;
  296. err->msg[0] = '\0';
  297. try {
  298. if (shutting_down) {
  299. throw std::runtime_error("server shutting down");
  300. }
  301. const json body = json::parse(json_req);
  302. std::string content;
  303. if (body.count("tokens") != 0) {
  304. const std::vector<llama_token> tokens = body["tokens"];
  305. content = tokens_to_str(llama->ctx, tokens.cbegin(), tokens.cend());
  306. }
  307. const json data = format_detokenized_response(content);
  308. std::string result_json = data.dump();
  309. const std::string::size_type size = result_json.size() + 1;
  310. *json_resp = new char[size];
  311. snprintf(*json_resp, size, "%s", result_json.c_str());
  312. } catch (std::exception &e) {
  313. err->id = -1;
  314. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  315. } catch (...) {
  316. err->id = -1;
  317. snprintf(err->msg, err->msg_len, "Unknown exception during detokenize");
  318. }
  319. }
  320. void llama_server_embedding(const char *json_req, char **json_resp,
  321. ext_server_resp_t *err) {
  322. assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  323. *json_resp = NULL;
  324. err->id = 0;
  325. err->msg[0] = '\0';
  326. try {
  327. if (shutting_down) {
  328. throw std::runtime_error("server shutting down");
  329. }
  330. const json body = json::parse(json_req);
  331. json prompt;
  332. if (body.count("content") != 0) {
  333. prompt = body["content"];
  334. } else {
  335. prompt = "";
  336. }
  337. const int task_id = llama->queue_tasks.get_new_id();
  338. llama->queue_results.add_waiting_task_id(task_id);
  339. llama->request_completion(task_id, {{"prompt", prompt}, {"n_predict", 0}}, false, true, -1);
  340. atomicRecv ar(recv_counter);
  341. task_result result = llama->queue_results.recv(task_id);
  342. std::string result_json = result.result_json.dump();
  343. const std::string::size_type size = result_json.size() + 1;
  344. *json_resp = new char[size];
  345. snprintf(*json_resp, size, "%s", result_json.c_str());
  346. llama->queue_results.remove_waiting_task_id(task_id);
  347. } catch (std::exception &e) {
  348. err->id = -1;
  349. snprintf(err->msg, err->msg_len, "exception %s", e.what());
  350. } catch (...) {
  351. err->id = -1;
  352. snprintf(err->msg, err->msg_len, "Unknown exception during embedding");
  353. }
  354. }