0001-Expose-callable-API-for-server.patch 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. From 7184ae16e8fd0e9e91cac4c81daa323057fa992b Mon Sep 17 00:00:00 2001
  2. From: Daniel Hiltgen <daniel@ollama.com>
  3. Date: Mon, 13 Nov 2023 12:25:58 -0800
  4. Subject: [PATCH] Expose callable API for server
  5. This adds an extern "C" interface within the example server
  6. ---
  7. examples/server/CMakeLists.txt | 24 +++
  8. examples/server/server.cpp | 276 +++++++++++++++++++++++++++++++++
  9. examples/server/server.h | 89 +++++++++++
  10. ggml-cuda.cu | 1 +
  11. 4 files changed, 390 insertions(+)
  12. create mode 100644 examples/server/server.h
  13. diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt
  14. index 859cd12..4ea47a7 100644
  15. --- a/examples/server/CMakeLists.txt
  16. +++ b/examples/server/CMakeLists.txt
  17. @@ -11,3 +11,27 @@ if (WIN32)
  18. TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
  19. endif()
  20. target_compile_features(${TARGET} PRIVATE cxx_std_11)
  21. +
  22. +set(TARGET ext_server)
  23. +option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
  24. +add_library(${TARGET} STATIC server.cpp)
  25. +target_include_directories(${TARGET} PRIVATE ../../common)
  26. +target_include_directories(${TARGET} PRIVATE ../..)
  27. +target_compile_features(${TARGET} PRIVATE cxx_std_11)
  28. +target_compile_definitions(${TARGET} PUBLIC LLAMA_SERVER_LIBRARY=1)
  29. +target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT})
  30. +
  31. +if (BUILD_SHARED_LIBS)
  32. + set_target_properties(ext_server PROPERTIES POSITION_INDEPENDENT_CODE ON)
  33. + target_compile_definitions(ext_server PRIVATE LLAMA_SHARED LLAMA_BUILD)
  34. + add_library(ext_server_shared SHARED $<TARGET_OBJECTS:ext_server>)
  35. + target_link_libraries(ext_server_shared PRIVATE ggml llama llava common ${CMAKE_THREAD_LIBS_INIT})
  36. + install(TARGETS ext_server_shared LIBRARY)
  37. +endif()
  38. +
  39. +if (CUDAToolkit_FOUND)
  40. + target_include_directories(${TARGET} PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
  41. + if (WIN32)
  42. + target_link_libraries(ext_server_shared PRIVATE nvml)
  43. + endif()
  44. +endif()
  45. \ No newline at end of file
  46. diff --git a/examples/server/server.cpp b/examples/server/server.cpp
  47. index 0403853..065420c 100644
  48. --- a/examples/server/server.cpp
  49. +++ b/examples/server/server.cpp
  50. @@ -5,6 +5,9 @@
  51. #include "../llava/clip.h"
  52. #include "stb_image.h"
  53. +#if defined(LLAMA_SERVER_LIBRARY)
  54. +#include "server.h"
  55. +#endif
  56. #ifndef NDEBUG
  57. // crash the server in debug mode, otherwise send an http 500 error
  58. @@ -2643,6 +2646,7 @@ static void append_to_generated_text_from_generated_token_probs(llama_server_con
  59. }
  60. }
  61. +#ifndef LLAMA_SERVER_LIBRARY
  62. int main(int argc, char **argv)
  63. {
  64. #if SERVER_VERBOSE != 1
  65. @@ -3123,3 +3127,275 @@ int main(int argc, char **argv)
  66. llama_backend_free();
  67. return 0;
  68. }
  69. +
  70. +#else // LLAMA_SERVER_LIBRARY
  71. +// Expose the llama server as a callable extern "C" API
  72. +llama_server_context *llama = NULL;
  73. +std::atomic<bool> ext_server_running(false);
  74. +std::thread ext_server_thread;
  75. +
  76. +void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err)
  77. +{
  78. + assert(err != NULL && sparams != NULL);
  79. + err->id = 0;
  80. + err->msg[0] = '\0';
  81. + try {
  82. + llama = new llama_server_context;
  83. + log_set_target(stdout);
  84. + gpt_params params;
  85. + params.n_ctx = sparams->n_ctx;
  86. + params.n_batch = sparams->n_batch;
  87. + if (sparams->n_threads > 0) {
  88. + params.n_threads = sparams->n_threads;
  89. + }
  90. + params.n_parallel = sparams->n_parallel;
  91. + params.rope_freq_base = sparams->rope_freq_base;
  92. + params.rope_freq_scale = sparams->rope_freq_scale;
  93. +
  94. + if (sparams->memory_f16) {
  95. + params.cache_type_k = "f16";
  96. + params.cache_type_v = "f16";
  97. + } else {
  98. + params.cache_type_k = "f32";
  99. + params.cache_type_v = "f32";
  100. + }
  101. +
  102. + params.n_gpu_layers = sparams->n_gpu_layers;
  103. + params.main_gpu = sparams->main_gpu;
  104. + params.use_mlock = sparams->use_mlock;
  105. + params.use_mmap = sparams->use_mmap;
  106. + params.numa = sparams->numa;
  107. + params.embedding = sparams->embedding;
  108. + if (sparams->model != NULL) {
  109. + params.model = sparams->model;
  110. + }
  111. +
  112. + for (ext_server_lora_adapter *la = sparams->lora_adapters; la != NULL; la = la->next) {
  113. + params.lora_adapter.push_back(std::make_tuple(la->adapter, la->scale));
  114. + }
  115. +
  116. + if (sparams->mmproj != NULL) {
  117. + params.mmproj = std::string(sparams->mmproj);
  118. + }
  119. +
  120. + llama_backend_init(params.numa);
  121. +
  122. + // load the model
  123. + if (!llama->load_model(params))
  124. + {
  125. + // TODO - consider modifying the logging logic or patching load_model so we can capture more detailed error messages
  126. + // and pass them back to the caller for better UX
  127. + err->id = -1;
  128. + snprintf(err->msg, err->msg_len, "error loading model %s", params.model.c_str());
  129. + return;
  130. + }
  131. +
  132. + llama->initialize();
  133. + } catch (std::exception &e) {
  134. + err->id = -1;
  135. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  136. + } catch (...) {
  137. + err->id = -1;
  138. + snprintf(err->msg, err->msg_len, "Unknown exception initializing llama server");
  139. + }
  140. +}
  141. +
  142. +void llama_server_start()
  143. +{
  144. + assert(llama != NULL);
  145. + // TODO mutex to protect thread creation
  146. + ext_server_thread = std::thread([&]()
  147. + {
  148. + ext_server_running = true;
  149. + try {
  150. + LOG_TEE("llama server main loop starting\n");
  151. + ggml_time_init();
  152. + while (ext_server_running.load())
  153. + {
  154. + if (!llama->update_slots()) {
  155. + LOG_TEE("unexpected error in llama server update_slots - exiting main loop\n");
  156. + break;
  157. + }
  158. + }
  159. + } catch (std::exception &e) {
  160. + LOG_TEE("caught exception in llama server main loop: %s\n", e.what());
  161. + } catch (...) {
  162. + LOG_TEE("caught unknown exception in llama server main loop\n");
  163. + }
  164. + LOG_TEE("\nllama server shutting down\n");
  165. + llama_backend_free();
  166. + });
  167. +}
  168. +
  169. +void llama_server_stop() {
  170. + assert(llama != NULL);
  171. + // TODO - too verbose, remove once things are solid
  172. + LOG_TEE("requesting llama server shutdown\n");
  173. + ext_server_running = false;
  174. + ext_server_thread.join();
  175. + delete llama;
  176. + llama = NULL;
  177. + LOG_TEE("llama server shutdown complete\n");
  178. +}
  179. +
  180. +void llama_server_completion(const char *json_req, ext_server_resp_t *resp) {
  181. + assert(llama != NULL && json_req != NULL && resp != NULL);
  182. + resp->id = -1;
  183. + resp->msg[0] = '\0';
  184. + try {
  185. + json data = json::parse(json_req);
  186. + resp->id = llama->request_completion(data, false, false, -1);
  187. + } catch (std::exception &e) {
  188. + snprintf(resp->msg, resp->msg_len, "exception %s", e.what());
  189. + } catch (...) {
  190. + snprintf(resp->msg, resp->msg_len, "Unknown exception during completion");
  191. + }
  192. +}
  193. +
  194. +void llama_server_completion_next_result(const int task_id, ext_server_task_result_t *resp) {
  195. + assert(llama != NULL && resp != NULL);
  196. + std::string msg;
  197. + resp->id = -1;
  198. + resp->stop = false;
  199. + resp->error = false;
  200. + resp->json_resp = NULL;
  201. + std::string result_json;
  202. + try {
  203. + task_result result = llama->next_result(task_id);
  204. + result_json = result.result_json.dump(-1, ' ', false, json::error_handler_t::replace);
  205. + resp->id = result.id;
  206. + resp->stop = result.stop;
  207. + resp->error = result.error;
  208. + if (result.error) {
  209. + llama->request_cancel(task_id);
  210. + } else if (result.stop) {
  211. + llama->request_cancel(task_id);
  212. + }
  213. + } catch (std::exception &e) {
  214. + resp->error = true;
  215. + resp->id = -1;
  216. + result_json = "{\"error\":\"exception " + std::string(e.what()) + "\"}";
  217. + } catch (...) {
  218. + resp->error = true;
  219. + resp->id = -1;
  220. + result_json = "{\"error\":\"Unknown exception during completion\"}";
  221. + }
  222. + const std::string::size_type size = result_json.size() + 1;
  223. + resp->json_resp = new char[size];
  224. + snprintf(resp->json_resp, size, "%s", result_json.c_str());
  225. +}
  226. +
  227. +void llama_server_release_task_result(ext_server_task_result_t *result) {
  228. + if (result == NULL || result->json_resp == NULL) {
  229. + return;
  230. + }
  231. + delete[] result->json_resp;
  232. +}
  233. +
  234. +void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err) {
  235. + assert(llama != NULL && err != NULL);
  236. + err->id = 0;
  237. + err->msg[0] = '\0';
  238. + try {
  239. + llama->request_cancel(task_id);
  240. + } catch (std::exception &e) {
  241. + err->id = -1;
  242. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  243. + } catch (...) {
  244. + err->id = -1;
  245. + snprintf(err->msg, err->msg_len, "Unknown exception completion cancel in llama server");
  246. + }
  247. +}
  248. +
  249. +void llama_server_tokenize(const char *json_req, char **json_resp, ext_server_resp_t *err) {
  250. + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  251. + *json_resp = NULL;
  252. + err->id = 0;
  253. + err->msg[0] = '\0';
  254. + try {
  255. + const json body = json::parse(json_req);
  256. + std::vector<llama_token> tokens;
  257. + if (body.count("content") != 0)
  258. + {
  259. + tokens = llama->tokenize(body["content"], false);
  260. + }
  261. + const json data = format_tokenizer_response(tokens);
  262. + std::string result_json = data.dump();
  263. + const std::string::size_type size = result_json.size() + 1;
  264. + *json_resp = new char[size];
  265. + snprintf(*json_resp, size, "%s", result_json.c_str());
  266. + } catch (std::exception &e) {
  267. + err->id = -1;
  268. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  269. + } catch (...) {
  270. + err->id = -1;
  271. + snprintf(err->msg, err->msg_len, "Unknown exception during tokenize");
  272. + }
  273. +}
  274. +
  275. +void llama_server_release_json_resp(char **json_resp) {
  276. + if (json_resp == NULL || *json_resp == NULL) {
  277. + return;
  278. + }
  279. + delete[] *json_resp;
  280. +}
  281. +
  282. +void llama_server_detokenize(const char *json_req, char **json_resp, ext_server_resp_t *err) {
  283. + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  284. + *json_resp = NULL;
  285. + err->id = 0;
  286. + err->msg[0] = '\0';
  287. + try {
  288. + const json body = json::parse(json_req);
  289. + std::string content;
  290. + if (body.count("tokens") != 0)
  291. + {
  292. + const std::vector<llama_token> tokens = body["tokens"];
  293. + content = tokens_to_str(llama->ctx, tokens.cbegin(), tokens.cend());
  294. + }
  295. + const json data = format_detokenized_response(content);
  296. + std::string result_json = data.dump();
  297. + const std::string::size_type size = result_json.size() + 1;
  298. + *json_resp = new char[size];
  299. + snprintf(*json_resp, size, "%s", result_json.c_str());
  300. + } catch (std::exception &e) {
  301. + err->id = -1;
  302. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  303. + } catch (...) {
  304. + err->id = -1;
  305. + snprintf(err->msg, err->msg_len, "Unknown exception during detokenize");
  306. + }
  307. +}
  308. +
  309. +void llama_server_embedding(const char *json_req, char** json_resp, ext_server_resp_t *err) {
  310. + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  311. + *json_resp = NULL;
  312. + err->id = 0;
  313. + err->msg[0] = '\0';
  314. + try {
  315. + const json body = json::parse(json_req);
  316. + json prompt;
  317. + if (body.count("content") != 0)
  318. + {
  319. + prompt = body["content"];
  320. + }
  321. + else
  322. + {
  323. + prompt = "";
  324. + }
  325. + const int task_id = llama->request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false, true, -1);
  326. + task_result result = llama->next_result(task_id);
  327. + std::string result_json = result.result_json.dump();
  328. + const std::string::size_type size = result_json.size() + 1;
  329. + *json_resp = new char[size];
  330. + snprintf(*json_resp, size, "%s", result_json.c_str());
  331. + } catch (std::exception &e) {
  332. + err->id = -1;
  333. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  334. + } catch (...) {
  335. + err->id = -1;
  336. + snprintf(err->msg, err->msg_len, "Unknown exception during embedding");
  337. + }
  338. +}
  339. +
  340. +#endif // LLAMA_SERVER_LIBRARY
  341. \ No newline at end of file
  342. diff --git a/examples/server/server.h b/examples/server/server.h
  343. new file mode 100644
  344. index 0000000..d22f1b6
  345. --- /dev/null
  346. +++ b/examples/server/server.h
  347. @@ -0,0 +1,89 @@
  348. +#if defined(LLAMA_SERVER_LIBRARY)
  349. +#ifndef LLAMA_SERVER_H
  350. +#define LLAMA_SERVER_H
  351. +#include <stddef.h>
  352. +#include <stdint.h>
  353. +#include <stdio.h>
  354. +#include <stdbool.h>
  355. +
  356. +// This exposes extern C entrypoints into the llama_server
  357. +// To enable the server compile with LLAMA_SERVER_LIBRARY
  358. +
  359. +#ifdef __cplusplus
  360. +extern "C"
  361. +{
  362. +#endif
  363. + typedef struct ext_server_resp {
  364. + int id; // < 0 on error
  365. + size_t msg_len; // caller must allocate msg and set msg_len
  366. + char *msg;
  367. + } ext_server_resp_t;
  368. +
  369. + // Allocated and freed by caller
  370. + typedef struct ext_server_lora_adapter {
  371. + char *adapter;
  372. + float scale;
  373. + struct ext_server_lora_adapter *next;
  374. + } ext_server_lora_adapter_t;
  375. +
  376. + // Allocated and freed by caller
  377. + typedef struct ext_server_params
  378. + {
  379. + char *model;
  380. + uint32_t n_ctx; // text context, 0 = from model
  381. + uint32_t n_batch; // prompt processing maximum batch size
  382. + uint32_t n_threads; // number of threads to use for generation
  383. + int32_t n_parallel; // number of parallel sequences to decodewra
  384. + float rope_freq_base; // RoPE base frequency, 0 = from model
  385. + float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
  386. + bool memory_f16; // use f16 instead of f32 for memory kv
  387. + int32_t n_gpu_layers; // number of layers to store in VRAM (-1 - use default)
  388. + int32_t main_gpu; // the GPU that is used for scratch and small tensors
  389. + bool use_mlock; // force system to keep model in RAM
  390. + bool use_mmap; // use mmap if possible
  391. + bool numa; // attempt optimizations that help on some NUMA systems
  392. + bool embedding; // get only sentence embedding
  393. + ext_server_lora_adapter_t* lora_adapters;
  394. + char *mmproj;
  395. + } ext_server_params_t;
  396. +
  397. + typedef struct ext_server_task_result
  398. + {
  399. + int id;
  400. + bool stop;
  401. + bool error;
  402. + char* json_resp; // null terminated, memory managed by ext_server
  403. + } ext_server_task_result_t;
  404. +
  405. + // Initialize the server once per process
  406. + // err->id = 0 for success and err->msg[0] = NULL
  407. + // err->id != 0 for failure, and err->msg contains error message
  408. + void llama_server_init(ext_server_params_t *sparams, ext_server_resp_t *err);
  409. +
  410. + // Run the main loop, called once per init
  411. + void llama_server_start();
  412. + // Stop the main loop and free up resources allocated in init and start. Init must be called again to reuse
  413. + void llama_server_stop();
  414. +
  415. + // json_req null terminated string, memory managed by caller
  416. + // resp->id >= 0 on success (task ID)
  417. + // resp->id < 0 on error, and resp->msg contains error message
  418. + void llama_server_completion(const char *json_req, ext_server_resp_t *resp);
  419. +
  420. + // Caller must call llama_server_release_task_result to free resp->json_resp
  421. + void llama_server_completion_next_result(const int task_id, ext_server_task_result_t *result);
  422. + void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err);
  423. + void llama_server_release_task_result(ext_server_task_result_t *result);
  424. +
  425. + // Caller must call llama_server_releaes_json_resp to free json_resp if err.id < 0
  426. + void llama_server_tokenize(const char *json_req, char **json_resp, ext_server_resp_t *err);
  427. + void llama_server_detokenize(const char *json_req, char **json_resp, ext_server_resp_t *err);
  428. + void llama_server_embedding(const char *json_req, char** json_resp, ext_server_resp_t *err);
  429. + void llama_server_release_json_resp(char **json_resp);
  430. +
  431. +#ifdef __cplusplus
  432. +}
  433. +#endif
  434. +
  435. +#endif
  436. +#endif // LLAMA_SERVER_LIBRARY
  437. \ No newline at end of file
  438. diff --git a/ggml-cuda.cu b/ggml-cuda.cu
  439. index f20846f..9640cf3 100644
  440. --- a/ggml-cuda.cu
  441. +++ b/ggml-cuda.cu
  442. @@ -6757,6 +6757,7 @@ static cudaError_t ggml_cuda_cpy_tensor_2d(
  443. CUDA_CHECK(cudaGetDevice(&id));
  444. src_ptr = (char *) extra->data_device[id];
  445. } else {
  446. + fprintf(stderr, "ggml_cuda_cpy_tensor_2d assert: backend: %d\n", src->backend);
  447. GGML_ASSERT(false);
  448. }
  449. char * dst_ptr = (char *) dst;
  450. --
  451. 2.39.3 (Apple Git-145)