0001-Expose-callable-API-for-server.patch 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. From 90c332fe2ef61149b38561d02836e66715df214d Mon Sep 17 00:00:00 2001
  2. From: Daniel Hiltgen <daniel@ollama.com>
  3. Date: Mon, 13 Nov 2023 12:25:58 -0800
  4. Subject: [PATCH] Expose callable API for server
  5. This adds an extern "C" interface within the example server
  6. ---
  7. examples/server/CMakeLists.txt | 27 ++++
  8. examples/server/server.cpp | 280 +++++++++++++++++++++++++++++++++
  9. examples/server/server.h | 89 +++++++++++
  10. ggml-cuda.cu | 1 +
  11. 4 files changed, 397 insertions(+)
  12. create mode 100644 examples/server/server.h
  13. diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt
  14. index 859cd12..da2b9bf 100644
  15. --- a/examples/server/CMakeLists.txt
  16. +++ b/examples/server/CMakeLists.txt
  17. @@ -11,3 +11,30 @@ if (WIN32)
  18. TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
  19. endif()
  20. target_compile_features(${TARGET} PRIVATE cxx_std_11)
  21. +
  22. +set(TARGET ext_server)
  23. +option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
  24. +add_library(${TARGET} STATIC server.cpp)
  25. +target_include_directories(${TARGET} PRIVATE ../../common)
  26. +target_include_directories(${TARGET} PRIVATE ../..)
  27. +target_compile_features(${TARGET} PRIVATE cxx_std_11)
  28. +target_compile_definitions(${TARGET} PUBLIC LLAMA_SERVER_LIBRARY=1)
  29. +target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT})
  30. +target_compile_definitions(${TARGET} PRIVATE
  31. + SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
  32. +)
  33. +
  34. +if (BUILD_SHARED_LIBS)
  35. + set_target_properties(ext_server PROPERTIES POSITION_INDEPENDENT_CODE ON)
  36. + target_compile_definitions(ext_server PRIVATE LLAMA_SHARED LLAMA_BUILD)
  37. + add_library(ext_server_shared SHARED $<TARGET_OBJECTS:ext_server>)
  38. + target_link_libraries(ext_server_shared PRIVATE ggml llama llava common ${CMAKE_THREAD_LIBS_INIT})
  39. + install(TARGETS ext_server_shared LIBRARY)
  40. +endif()
  41. +
  42. +if (CUDAToolkit_FOUND)
  43. + target_include_directories(${TARGET} PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
  44. + if (WIN32)
  45. + target_link_libraries(ext_server_shared PRIVATE nvml)
  46. + endif()
  47. +endif()
  48. \ No newline at end of file
  49. diff --git a/examples/server/server.cpp b/examples/server/server.cpp
  50. index 0403853..07fb05c 100644
  51. --- a/examples/server/server.cpp
  52. +++ b/examples/server/server.cpp
  53. @@ -5,6 +5,9 @@
  54. #include "../llava/clip.h"
  55. #include "stb_image.h"
  56. +#if defined(LLAMA_SERVER_LIBRARY)
  57. +#include "server.h"
  58. +#endif
  59. #ifndef NDEBUG
  60. // crash the server in debug mode, otherwise send an http 500 error
  61. @@ -2643,6 +2646,7 @@ static void append_to_generated_text_from_generated_token_probs(llama_server_con
  62. }
  63. }
  64. +#ifndef LLAMA_SERVER_LIBRARY
  65. int main(int argc, char **argv)
  66. {
  67. #if SERVER_VERBOSE != 1
  68. @@ -3123,3 +3127,279 @@ int main(int argc, char **argv)
  69. llama_backend_free();
  70. return 0;
  71. }
  72. +
  73. +#else // LLAMA_SERVER_LIBRARY
  74. +// Expose the llama server as a callable extern "C" API
  75. +llama_server_context *llama = NULL;
  76. +std::atomic<bool> ext_server_running(false);
  77. +std::thread ext_server_thread;
  78. +
  79. +void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err)
  80. +{
  81. +#if SERVER_VERBOSE != 1
  82. + LOG_TEE("disabling verbose llm logging\n");
  83. + log_disable();
  84. +#endif
  85. + assert(err != NULL && sparams != NULL);
  86. + err->id = 0;
  87. + err->msg[0] = '\0';
  88. + try {
  89. + llama = new llama_server_context;
  90. + log_set_target(stdout);
  91. + gpt_params params;
  92. + params.n_ctx = sparams->n_ctx;
  93. + params.n_batch = sparams->n_batch;
  94. + if (sparams->n_threads > 0) {
  95. + params.n_threads = sparams->n_threads;
  96. + }
  97. + params.n_parallel = sparams->n_parallel;
  98. + params.rope_freq_base = sparams->rope_freq_base;
  99. + params.rope_freq_scale = sparams->rope_freq_scale;
  100. +
  101. + if (sparams->memory_f16) {
  102. + params.cache_type_k = "f16";
  103. + params.cache_type_v = "f16";
  104. + } else {
  105. + params.cache_type_k = "f32";
  106. + params.cache_type_v = "f32";
  107. + }
  108. +
  109. + params.n_gpu_layers = sparams->n_gpu_layers;
  110. + params.main_gpu = sparams->main_gpu;
  111. + params.use_mlock = sparams->use_mlock;
  112. + params.use_mmap = sparams->use_mmap;
  113. + params.numa = sparams->numa;
  114. + params.embedding = sparams->embedding;
  115. + if (sparams->model != NULL) {
  116. + params.model = sparams->model;
  117. + }
  118. +
  119. + for (ext_server_lora_adapter *la = sparams->lora_adapters; la != NULL; la = la->next) {
  120. + params.lora_adapter.push_back(std::make_tuple(la->adapter, la->scale));
  121. + }
  122. +
  123. + if (sparams->mmproj != NULL) {
  124. + params.mmproj = std::string(sparams->mmproj);
  125. + }
  126. +
  127. + llama_backend_init(params.numa);
  128. +
  129. + // load the model
  130. + if (!llama->load_model(params))
  131. + {
  132. + // TODO - consider modifying the logging logic or patching load_model so we can capture more detailed error messages
  133. + // and pass them back to the caller for better UX
  134. + err->id = -1;
  135. + snprintf(err->msg, err->msg_len, "error loading model %s", params.model.c_str());
  136. + return;
  137. + }
  138. +
  139. + llama->initialize();
  140. + } catch (std::exception &e) {
  141. + err->id = -1;
  142. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  143. + } catch (...) {
  144. + err->id = -1;
  145. + snprintf(err->msg, err->msg_len, "Unknown exception initializing llama server");
  146. + }
  147. +}
  148. +
  149. +void llama_server_start()
  150. +{
  151. + assert(llama != NULL);
  152. + // TODO mutex to protect thread creation
  153. + ext_server_thread = std::thread([&]()
  154. + {
  155. + ext_server_running = true;
  156. + try {
  157. + LOG_TEE("llama server main loop starting\n");
  158. + ggml_time_init();
  159. + while (ext_server_running.load())
  160. + {
  161. + if (!llama->update_slots()) {
  162. + LOG_TEE("unexpected error in llama server update_slots - exiting main loop\n");
  163. + break;
  164. + }
  165. + }
  166. + } catch (std::exception &e) {
  167. + LOG_TEE("caught exception in llama server main loop: %s\n", e.what());
  168. + } catch (...) {
  169. + LOG_TEE("caught unknown exception in llama server main loop\n");
  170. + }
  171. + LOG_TEE("\nllama server shutting down\n");
  172. + llama_backend_free();
  173. + });
  174. +}
  175. +
  176. +void llama_server_stop() {
  177. + assert(llama != NULL);
  178. + // TODO - too verbose, remove once things are solid
  179. + LOG_TEE("requesting llama server shutdown\n");
  180. + ext_server_running = false;
  181. + ext_server_thread.join();
  182. + delete llama;
  183. + llama = NULL;
  184. + LOG_TEE("llama server shutdown complete\n");
  185. +}
  186. +
  187. +void llama_server_completion(const char *json_req, ext_server_resp_t *resp) {
  188. + assert(llama != NULL && json_req != NULL && resp != NULL);
  189. + resp->id = -1;
  190. + resp->msg[0] = '\0';
  191. + try {
  192. + json data = json::parse(json_req);
  193. + resp->id = llama->request_completion(data, false, false, -1);
  194. + } catch (std::exception &e) {
  195. + snprintf(resp->msg, resp->msg_len, "exception %s", e.what());
  196. + } catch (...) {
  197. + snprintf(resp->msg, resp->msg_len, "Unknown exception during completion");
  198. + }
  199. +}
  200. +
  201. +void llama_server_completion_next_result(const int task_id, ext_server_task_result_t *resp) {
  202. + assert(llama != NULL && resp != NULL);
  203. + std::string msg;
  204. + resp->id = -1;
  205. + resp->stop = false;
  206. + resp->error = false;
  207. + resp->json_resp = NULL;
  208. + std::string result_json;
  209. + try {
  210. + task_result result = llama->next_result(task_id);
  211. + result_json = result.result_json.dump(-1, ' ', false, json::error_handler_t::replace);
  212. + resp->id = result.id;
  213. + resp->stop = result.stop;
  214. + resp->error = result.error;
  215. + if (result.error) {
  216. + llama->request_cancel(task_id);
  217. + } else if (result.stop) {
  218. + llama->request_cancel(task_id);
  219. + }
  220. + } catch (std::exception &e) {
  221. + resp->error = true;
  222. + resp->id = -1;
  223. + result_json = "{\"error\":\"exception " + std::string(e.what()) + "\"}";
  224. + } catch (...) {
  225. + resp->error = true;
  226. + resp->id = -1;
  227. + result_json = "{\"error\":\"Unknown exception during completion\"}";
  228. + }
  229. + const std::string::size_type size = result_json.size() + 1;
  230. + resp->json_resp = new char[size];
  231. + snprintf(resp->json_resp, size, "%s", result_json.c_str());
  232. +}
  233. +
  234. +void llama_server_release_task_result(ext_server_task_result_t *result) {
  235. + if (result == NULL || result->json_resp == NULL) {
  236. + return;
  237. + }
  238. + delete[] result->json_resp;
  239. +}
  240. +
  241. +void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err) {
  242. + assert(llama != NULL && err != NULL);
  243. + err->id = 0;
  244. + err->msg[0] = '\0';
  245. + try {
  246. + llama->request_cancel(task_id);
  247. + } catch (std::exception &e) {
  248. + err->id = -1;
  249. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  250. + } catch (...) {
  251. + err->id = -1;
  252. + snprintf(err->msg, err->msg_len, "Unknown exception completion cancel in llama server");
  253. + }
  254. +}
  255. +
  256. +void llama_server_tokenize(const char *json_req, char **json_resp, ext_server_resp_t *err) {
  257. + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  258. + *json_resp = NULL;
  259. + err->id = 0;
  260. + err->msg[0] = '\0';
  261. + try {
  262. + const json body = json::parse(json_req);
  263. + std::vector<llama_token> tokens;
  264. + if (body.count("content") != 0)
  265. + {
  266. + tokens = llama->tokenize(body["content"], false);
  267. + }
  268. + const json data = format_tokenizer_response(tokens);
  269. + std::string result_json = data.dump();
  270. + const std::string::size_type size = result_json.size() + 1;
  271. + *json_resp = new char[size];
  272. + snprintf(*json_resp, size, "%s", result_json.c_str());
  273. + } catch (std::exception &e) {
  274. + err->id = -1;
  275. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  276. + } catch (...) {
  277. + err->id = -1;
  278. + snprintf(err->msg, err->msg_len, "Unknown exception during tokenize");
  279. + }
  280. +}
  281. +
  282. +void llama_server_release_json_resp(char **json_resp) {
  283. + if (json_resp == NULL || *json_resp == NULL) {
  284. + return;
  285. + }
  286. + delete[] *json_resp;
  287. +}
  288. +
  289. +void llama_server_detokenize(const char *json_req, char **json_resp, ext_server_resp_t *err) {
  290. + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  291. + *json_resp = NULL;
  292. + err->id = 0;
  293. + err->msg[0] = '\0';
  294. + try {
  295. + const json body = json::parse(json_req);
  296. + std::string content;
  297. + if (body.count("tokens") != 0)
  298. + {
  299. + const std::vector<llama_token> tokens = body["tokens"];
  300. + content = tokens_to_str(llama->ctx, tokens.cbegin(), tokens.cend());
  301. + }
  302. + const json data = format_detokenized_response(content);
  303. + std::string result_json = data.dump();
  304. + const std::string::size_type size = result_json.size() + 1;
  305. + *json_resp = new char[size];
  306. + snprintf(*json_resp, size, "%s", result_json.c_str());
  307. + } catch (std::exception &e) {
  308. + err->id = -1;
  309. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  310. + } catch (...) {
  311. + err->id = -1;
  312. + snprintf(err->msg, err->msg_len, "Unknown exception during detokenize");
  313. + }
  314. +}
  315. +
  316. +void llama_server_embedding(const char *json_req, char** json_resp, ext_server_resp_t *err) {
  317. + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  318. + *json_resp = NULL;
  319. + err->id = 0;
  320. + err->msg[0] = '\0';
  321. + try {
  322. + const json body = json::parse(json_req);
  323. + json prompt;
  324. + if (body.count("content") != 0)
  325. + {
  326. + prompt = body["content"];
  327. + }
  328. + else
  329. + {
  330. + prompt = "";
  331. + }
  332. + const int task_id = llama->request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false, true, -1);
  333. + task_result result = llama->next_result(task_id);
  334. + std::string result_json = result.result_json.dump();
  335. + const std::string::size_type size = result_json.size() + 1;
  336. + *json_resp = new char[size];
  337. + snprintf(*json_resp, size, "%s", result_json.c_str());
  338. + } catch (std::exception &e) {
  339. + err->id = -1;
  340. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  341. + } catch (...) {
  342. + err->id = -1;
  343. + snprintf(err->msg, err->msg_len, "Unknown exception during embedding");
  344. + }
  345. +}
  346. +
  347. +#endif // LLAMA_SERVER_LIBRARY
  348. \ No newline at end of file
  349. diff --git a/examples/server/server.h b/examples/server/server.h
  350. new file mode 100644
  351. index 0000000..d22f1b6
  352. --- /dev/null
  353. +++ b/examples/server/server.h
  354. @@ -0,0 +1,89 @@
  355. +#if defined(LLAMA_SERVER_LIBRARY)
  356. +#ifndef LLAMA_SERVER_H
  357. +#define LLAMA_SERVER_H
  358. +#include <stddef.h>
  359. +#include <stdint.h>
  360. +#include <stdio.h>
  361. +#include <stdbool.h>
  362. +
  363. +// This exposes extern C entrypoints into the llama_server
  364. +// To enable the server compile with LLAMA_SERVER_LIBRARY
  365. +
  366. +#ifdef __cplusplus
  367. +extern "C"
  368. +{
  369. +#endif
  370. + typedef struct ext_server_resp {
  371. + int id; // < 0 on error
  372. + size_t msg_len; // caller must allocate msg and set msg_len
  373. + char *msg;
  374. + } ext_server_resp_t;
  375. +
  376. + // Allocated and freed by caller
  377. + typedef struct ext_server_lora_adapter {
  378. + char *adapter;
  379. + float scale;
  380. + struct ext_server_lora_adapter *next;
  381. + } ext_server_lora_adapter_t;
  382. +
  383. + // Allocated and freed by caller
  384. + typedef struct ext_server_params
  385. + {
  386. + char *model;
  387. + uint32_t n_ctx; // text context, 0 = from model
  388. + uint32_t n_batch; // prompt processing maximum batch size
  389. + uint32_t n_threads; // number of threads to use for generation
  390. + int32_t n_parallel; // number of parallel sequences to decodewra
  391. + float rope_freq_base; // RoPE base frequency, 0 = from model
  392. + float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
  393. + bool memory_f16; // use f16 instead of f32 for memory kv
  394. + int32_t n_gpu_layers; // number of layers to store in VRAM (-1 - use default)
  395. + int32_t main_gpu; // the GPU that is used for scratch and small tensors
  396. + bool use_mlock; // force system to keep model in RAM
  397. + bool use_mmap; // use mmap if possible
  398. + bool numa; // attempt optimizations that help on some NUMA systems
  399. + bool embedding; // get only sentence embedding
  400. + ext_server_lora_adapter_t* lora_adapters;
  401. + char *mmproj;
  402. + } ext_server_params_t;
  403. +
  404. + typedef struct ext_server_task_result
  405. + {
  406. + int id;
  407. + bool stop;
  408. + bool error;
  409. + char* json_resp; // null terminated, memory managed by ext_server
  410. + } ext_server_task_result_t;
  411. +
  412. + // Initialize the server once per process
  413. + // err->id = 0 for success and err->msg[0] = NULL
  414. + // err->id != 0 for failure, and err->msg contains error message
  415. + void llama_server_init(ext_server_params_t *sparams, ext_server_resp_t *err);
  416. +
  417. + // Run the main loop, called once per init
  418. + void llama_server_start();
  419. + // Stop the main loop and free up resources allocated in init and start. Init must be called again to reuse
  420. + void llama_server_stop();
  421. +
  422. + // json_req null terminated string, memory managed by caller
  423. + // resp->id >= 0 on success (task ID)
  424. + // resp->id < 0 on error, and resp->msg contains error message
  425. + void llama_server_completion(const char *json_req, ext_server_resp_t *resp);
  426. +
  427. + // Caller must call llama_server_release_task_result to free resp->json_resp
  428. + void llama_server_completion_next_result(const int task_id, ext_server_task_result_t *result);
  429. + void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err);
  430. + void llama_server_release_task_result(ext_server_task_result_t *result);
  431. +
  432. + // Caller must call llama_server_releaes_json_resp to free json_resp if err.id < 0
  433. + void llama_server_tokenize(const char *json_req, char **json_resp, ext_server_resp_t *err);
  434. + void llama_server_detokenize(const char *json_req, char **json_resp, ext_server_resp_t *err);
  435. + void llama_server_embedding(const char *json_req, char** json_resp, ext_server_resp_t *err);
  436. + void llama_server_release_json_resp(char **json_resp);
  437. +
  438. +#ifdef __cplusplus
  439. +}
  440. +#endif
  441. +
  442. +#endif
  443. +#endif // LLAMA_SERVER_LIBRARY
  444. \ No newline at end of file
  445. diff --git a/ggml-cuda.cu b/ggml-cuda.cu
  446. index f20846f..9640cf3 100644
  447. --- a/ggml-cuda.cu
  448. +++ b/ggml-cuda.cu
  449. @@ -6757,6 +6757,7 @@ static cudaError_t ggml_cuda_cpy_tensor_2d(
  450. CUDA_CHECK(cudaGetDevice(&id));
  451. src_ptr = (char *) extra->data_device[id];
  452. } else {
  453. + fprintf(stderr, "ggml_cuda_cpy_tensor_2d assert: backend: %d\n", src->backend);
  454. GGML_ASSERT(false);
  455. }
  456. char * dst_ptr = (char *) dst;
  457. --
  458. 2.39.3 (Apple Git-145)