0001-Expose-callable-API-for-server.patch 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. From 4c72576c5f6c2217b1ecf7fd8523616acc5526ae Mon Sep 17 00:00:00 2001
  2. From: Daniel Hiltgen <daniel@ollama.com>
  3. Date: Mon, 13 Nov 2023 12:25:58 -0800
  4. Subject: [PATCH] Expose callable API for server
  5. This adds an extern "C" interface within the example server
  6. ---
  7. examples/server/CMakeLists.txt | 24 +++
  8. examples/server/server.cpp | 279 +++++++++++++++++++++++++++++++++
  9. examples/server/server.h | 89 +++++++++++
  10. ggml-cuda.cu | 1 +
  11. 4 files changed, 393 insertions(+)
  12. create mode 100644 examples/server/server.h
  13. diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt
  14. index 859cd12..4ea47a7 100644
  15. --- a/examples/server/CMakeLists.txt
  16. +++ b/examples/server/CMakeLists.txt
  17. @@ -11,3 +11,27 @@ if (WIN32)
  18. TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
  19. endif()
  20. target_compile_features(${TARGET} PRIVATE cxx_std_11)
  21. +
  22. +set(TARGET ext_server)
  23. +option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
  24. +add_library(${TARGET} STATIC server.cpp)
  25. +target_include_directories(${TARGET} PRIVATE ../../common)
  26. +target_include_directories(${TARGET} PRIVATE ../..)
  27. +target_compile_features(${TARGET} PRIVATE cxx_std_11)
  28. +target_compile_definitions(${TARGET} PUBLIC LLAMA_SERVER_LIBRARY=1)
  29. +target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT})
  30. +
  31. +if (BUILD_SHARED_LIBS)
  32. + set_target_properties(ext_server PROPERTIES POSITION_INDEPENDENT_CODE ON)
  33. + target_compile_definitions(ext_server PRIVATE LLAMA_SHARED LLAMA_BUILD)
  34. + add_library(ext_server_shared SHARED $<TARGET_OBJECTS:ext_server>)
  35. + target_link_libraries(ext_server_shared PRIVATE ggml llama llava common ${CMAKE_THREAD_LIBS_INIT})
  36. + install(TARGETS ext_server_shared LIBRARY)
  37. +endif()
  38. +
  39. +if (CUDAToolkit_FOUND)
  40. + target_include_directories(${TARGET} PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
  41. + if (WIN32)
  42. + target_link_libraries(ext_server_shared PRIVATE nvml)
  43. + endif()
  44. +endif()
  45. \ No newline at end of file
  46. diff --git a/examples/server/server.cpp b/examples/server/server.cpp
  47. index 0403853..5e78e4d 100644
  48. --- a/examples/server/server.cpp
  49. +++ b/examples/server/server.cpp
  50. @@ -5,6 +5,9 @@
  51. #include "../llava/clip.h"
  52. #include "stb_image.h"
  53. +#if defined(LLAMA_SERVER_LIBRARY)
  54. +#include "server.h"
  55. +#endif
  56. #ifndef NDEBUG
  57. // crash the server in debug mode, otherwise send an http 500 error
  58. @@ -2643,6 +2646,7 @@ static void append_to_generated_text_from_generated_token_probs(llama_server_con
  59. }
  60. }
  61. +#ifndef LLAMA_SERVER_LIBRARY
  62. int main(int argc, char **argv)
  63. {
  64. #if SERVER_VERBOSE != 1
  65. @@ -3123,3 +3127,278 @@ int main(int argc, char **argv)
  66. llama_backend_free();
  67. return 0;
  68. }
  69. +
  70. +#else // LLAMA_SERVER_LIBRARY
  71. +// Expose the llama server as a callable extern "C" API
  72. +llama_server_context *llama = NULL;
  73. +std::atomic<bool> ext_server_running(false);
  74. +std::thread ext_server_thread;
  75. +
  76. +void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err)
  77. +{
  78. +#if SERVER_VERBOSE != 1
  79. + log_disable();
  80. +#endif
  81. + assert(err != NULL && sparams != NULL);
  82. + err->id = 0;
  83. + err->msg[0] = '\0';
  84. + try {
  85. + llama = new llama_server_context;
  86. + log_set_target(stdout);
  87. + gpt_params params;
  88. + params.n_ctx = sparams->n_ctx;
  89. + params.n_batch = sparams->n_batch;
  90. + if (sparams->n_threads > 0) {
  91. + params.n_threads = sparams->n_threads;
  92. + }
  93. + params.n_parallel = sparams->n_parallel;
  94. + params.rope_freq_base = sparams->rope_freq_base;
  95. + params.rope_freq_scale = sparams->rope_freq_scale;
  96. +
  97. + if (sparams->memory_f16) {
  98. + params.cache_type_k = "f16";
  99. + params.cache_type_v = "f16";
  100. + } else {
  101. + params.cache_type_k = "f32";
  102. + params.cache_type_v = "f32";
  103. + }
  104. +
  105. + params.n_gpu_layers = sparams->n_gpu_layers;
  106. + params.main_gpu = sparams->main_gpu;
  107. + params.use_mlock = sparams->use_mlock;
  108. + params.use_mmap = sparams->use_mmap;
  109. + params.numa = sparams->numa;
  110. + params.embedding = sparams->embedding;
  111. + if (sparams->model != NULL) {
  112. + params.model = sparams->model;
  113. + }
  114. +
  115. + for (ext_server_lora_adapter *la = sparams->lora_adapters; la != NULL; la = la->next) {
  116. + params.lora_adapter.push_back(std::make_tuple(la->adapter, la->scale));
  117. + }
  118. +
  119. + if (sparams->mmproj != NULL) {
  120. + params.mmproj = std::string(sparams->mmproj);
  121. + }
  122. +
  123. + llama_backend_init(params.numa);
  124. +
  125. + // load the model
  126. + if (!llama->load_model(params))
  127. + {
  128. + // TODO - consider modifying the logging logic or patching load_model so we can capture more detailed error messages
  129. + // and pass them back to the caller for better UX
  130. + err->id = -1;
  131. + snprintf(err->msg, err->msg_len, "error loading model %s", params.model.c_str());
  132. + return;
  133. + }
  134. +
  135. + llama->initialize();
  136. + } catch (std::exception &e) {
  137. + err->id = -1;
  138. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  139. + } catch (...) {
  140. + err->id = -1;
  141. + snprintf(err->msg, err->msg_len, "Unknown exception initializing llama server");
  142. + }
  143. +}
  144. +
  145. +void llama_server_start()
  146. +{
  147. + assert(llama != NULL);
  148. + // TODO mutex to protect thread creation
  149. + ext_server_thread = std::thread([&]()
  150. + {
  151. + ext_server_running = true;
  152. + try {
  153. + LOG_TEE("llama server main loop starting\n");
  154. + ggml_time_init();
  155. + while (ext_server_running.load())
  156. + {
  157. + if (!llama->update_slots()) {
  158. + LOG_TEE("unexpected error in llama server update_slots - exiting main loop\n");
  159. + break;
  160. + }
  161. + }
  162. + } catch (std::exception &e) {
  163. + LOG_TEE("caught exception in llama server main loop: %s\n", e.what());
  164. + } catch (...) {
  165. + LOG_TEE("caught unknown exception in llama server main loop\n");
  166. + }
  167. + LOG_TEE("\nllama server shutting down\n");
  168. + llama_backend_free();
  169. + });
  170. +}
  171. +
  172. +void llama_server_stop() {
  173. + assert(llama != NULL);
  174. + // TODO - too verbose, remove once things are solid
  175. + LOG_TEE("requesting llama server shutdown\n");
  176. + ext_server_running = false;
  177. + ext_server_thread.join();
  178. + delete llama;
  179. + llama = NULL;
  180. + LOG_TEE("llama server shutdown complete\n");
  181. +}
  182. +
  183. +void llama_server_completion(const char *json_req, ext_server_resp_t *resp) {
  184. + assert(llama != NULL && json_req != NULL && resp != NULL);
  185. + resp->id = -1;
  186. + resp->msg[0] = '\0';
  187. + try {
  188. + json data = json::parse(json_req);
  189. + resp->id = llama->request_completion(data, false, false, -1);
  190. + } catch (std::exception &e) {
  191. + snprintf(resp->msg, resp->msg_len, "exception %s", e.what());
  192. + } catch (...) {
  193. + snprintf(resp->msg, resp->msg_len, "Unknown exception during completion");
  194. + }
  195. +}
  196. +
  197. +void llama_server_completion_next_result(const int task_id, ext_server_task_result_t *resp) {
  198. + assert(llama != NULL && resp != NULL);
  199. + std::string msg;
  200. + resp->id = -1;
  201. + resp->stop = false;
  202. + resp->error = false;
  203. + resp->json_resp = NULL;
  204. + std::string result_json;
  205. + try {
  206. + task_result result = llama->next_result(task_id);
  207. + result_json = result.result_json.dump(-1, ' ', false, json::error_handler_t::replace);
  208. + resp->id = result.id;
  209. + resp->stop = result.stop;
  210. + resp->error = result.error;
  211. + if (result.error) {
  212. + llama->request_cancel(task_id);
  213. + } else if (result.stop) {
  214. + llama->request_cancel(task_id);
  215. + }
  216. + } catch (std::exception &e) {
  217. + resp->error = true;
  218. + resp->id = -1;
  219. + result_json = "{\"error\":\"exception " + std::string(e.what()) + "\"}";
  220. + } catch (...) {
  221. + resp->error = true;
  222. + resp->id = -1;
  223. + result_json = "{\"error\":\"Unknown exception during completion\"}";
  224. + }
  225. + const std::string::size_type size = result_json.size() + 1;
  226. + resp->json_resp = new char[size];
  227. + snprintf(resp->json_resp, size, "%s", result_json.c_str());
  228. +}
  229. +
  230. +void llama_server_release_task_result(ext_server_task_result_t *result) {
  231. + if (result == NULL || result->json_resp == NULL) {
  232. + return;
  233. + }
  234. + delete[] result->json_resp;
  235. +}
  236. +
  237. +void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err) {
  238. + assert(llama != NULL && err != NULL);
  239. + err->id = 0;
  240. + err->msg[0] = '\0';
  241. + try {
  242. + llama->request_cancel(task_id);
  243. + } catch (std::exception &e) {
  244. + err->id = -1;
  245. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  246. + } catch (...) {
  247. + err->id = -1;
  248. + snprintf(err->msg, err->msg_len, "Unknown exception completion cancel in llama server");
  249. + }
  250. +}
  251. +
  252. +void llama_server_tokenize(const char *json_req, char **json_resp, ext_server_resp_t *err) {
  253. + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  254. + *json_resp = NULL;
  255. + err->id = 0;
  256. + err->msg[0] = '\0';
  257. + try {
  258. + const json body = json::parse(json_req);
  259. + std::vector<llama_token> tokens;
  260. + if (body.count("content") != 0)
  261. + {
  262. + tokens = llama->tokenize(body["content"], false);
  263. + }
  264. + const json data = format_tokenizer_response(tokens);
  265. + std::string result_json = data.dump();
  266. + const std::string::size_type size = result_json.size() + 1;
  267. + *json_resp = new char[size];
  268. + snprintf(*json_resp, size, "%s", result_json.c_str());
  269. + } catch (std::exception &e) {
  270. + err->id = -1;
  271. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  272. + } catch (...) {
  273. + err->id = -1;
  274. + snprintf(err->msg, err->msg_len, "Unknown exception during tokenize");
  275. + }
  276. +}
  277. +
  278. +void llama_server_release_json_resp(char **json_resp) {
  279. + if (json_resp == NULL || *json_resp == NULL) {
  280. + return;
  281. + }
  282. + delete[] *json_resp;
  283. +}
  284. +
  285. +void llama_server_detokenize(const char *json_req, char **json_resp, ext_server_resp_t *err) {
  286. + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  287. + *json_resp = NULL;
  288. + err->id = 0;
  289. + err->msg[0] = '\0';
  290. + try {
  291. + const json body = json::parse(json_req);
  292. + std::string content;
  293. + if (body.count("tokens") != 0)
  294. + {
  295. + const std::vector<llama_token> tokens = body["tokens"];
  296. + content = tokens_to_str(llama->ctx, tokens.cbegin(), tokens.cend());
  297. + }
  298. + const json data = format_detokenized_response(content);
  299. + std::string result_json = data.dump();
  300. + const std::string::size_type size = result_json.size() + 1;
  301. + *json_resp = new char[size];
  302. + snprintf(*json_resp, size, "%s", result_json.c_str());
  303. + } catch (std::exception &e) {
  304. + err->id = -1;
  305. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  306. + } catch (...) {
  307. + err->id = -1;
  308. + snprintf(err->msg, err->msg_len, "Unknown exception during detokenize");
  309. + }
  310. +}
  311. +
  312. +void llama_server_embedding(const char *json_req, char** json_resp, ext_server_resp_t *err) {
  313. + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
  314. + *json_resp = NULL;
  315. + err->id = 0;
  316. + err->msg[0] = '\0';
  317. + try {
  318. + const json body = json::parse(json_req);
  319. + json prompt;
  320. + if (body.count("content") != 0)
  321. + {
  322. + prompt = body["content"];
  323. + }
  324. + else
  325. + {
  326. + prompt = "";
  327. + }
  328. + const int task_id = llama->request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false, true, -1);
  329. + task_result result = llama->next_result(task_id);
  330. + std::string result_json = result.result_json.dump();
  331. + const std::string::size_type size = result_json.size() + 1;
  332. + *json_resp = new char[size];
  333. + snprintf(*json_resp, size, "%s", result_json.c_str());
  334. + } catch (std::exception &e) {
  335. + err->id = -1;
  336. + snprintf(err->msg, err->msg_len, "exception %s", e.what());
  337. + } catch (...) {
  338. + err->id = -1;
  339. + snprintf(err->msg, err->msg_len, "Unknown exception during embedding");
  340. + }
  341. +}
  342. +
  343. +#endif // LLAMA_SERVER_LIBRARY
  344. \ No newline at end of file
  345. diff --git a/examples/server/server.h b/examples/server/server.h
  346. new file mode 100644
  347. index 0000000..d22f1b6
  348. --- /dev/null
  349. +++ b/examples/server/server.h
  350. @@ -0,0 +1,89 @@
  351. +#if defined(LLAMA_SERVER_LIBRARY)
  352. +#ifndef LLAMA_SERVER_H
  353. +#define LLAMA_SERVER_H
  354. +#include <stddef.h>
  355. +#include <stdint.h>
  356. +#include <stdio.h>
  357. +#include <stdbool.h>
  358. +
  359. +// This exposes extern C entrypoints into the llama_server
  360. +// To enable the server compile with LLAMA_SERVER_LIBRARY
  361. +
  362. +#ifdef __cplusplus
  363. +extern "C"
  364. +{
  365. +#endif
  366. + typedef struct ext_server_resp {
  367. + int id; // < 0 on error
  368. + size_t msg_len; // caller must allocate msg and set msg_len
  369. + char *msg;
  370. + } ext_server_resp_t;
  371. +
  372. + // Allocated and freed by caller
  373. + typedef struct ext_server_lora_adapter {
  374. + char *adapter;
  375. + float scale;
  376. + struct ext_server_lora_adapter *next;
  377. + } ext_server_lora_adapter_t;
  378. +
  379. + // Allocated and freed by caller
  380. + typedef struct ext_server_params
  381. + {
  382. + char *model;
  383. + uint32_t n_ctx; // text context, 0 = from model
  384. + uint32_t n_batch; // prompt processing maximum batch size
  385. + uint32_t n_threads; // number of threads to use for generation
  386. + int32_t n_parallel; // number of parallel sequences to decodewra
  387. + float rope_freq_base; // RoPE base frequency, 0 = from model
  388. + float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
  389. + bool memory_f16; // use f16 instead of f32 for memory kv
  390. + int32_t n_gpu_layers; // number of layers to store in VRAM (-1 - use default)
  391. + int32_t main_gpu; // the GPU that is used for scratch and small tensors
  392. + bool use_mlock; // force system to keep model in RAM
  393. + bool use_mmap; // use mmap if possible
  394. + bool numa; // attempt optimizations that help on some NUMA systems
  395. + bool embedding; // get only sentence embedding
  396. + ext_server_lora_adapter_t* lora_adapters;
  397. + char *mmproj;
  398. + } ext_server_params_t;
  399. +
  400. + typedef struct ext_server_task_result
  401. + {
  402. + int id;
  403. + bool stop;
  404. + bool error;
  405. + char* json_resp; // null terminated, memory managed by ext_server
  406. + } ext_server_task_result_t;
  407. +
  408. + // Initialize the server once per process
  409. + // err->id = 0 for success and err->msg[0] = NULL
  410. + // err->id != 0 for failure, and err->msg contains error message
  411. + void llama_server_init(ext_server_params_t *sparams, ext_server_resp_t *err);
  412. +
  413. + // Run the main loop, called once per init
  414. + void llama_server_start();
  415. + // Stop the main loop and free up resources allocated in init and start. Init must be called again to reuse
  416. + void llama_server_stop();
  417. +
  418. + // json_req null terminated string, memory managed by caller
  419. + // resp->id >= 0 on success (task ID)
  420. + // resp->id < 0 on error, and resp->msg contains error message
  421. + void llama_server_completion(const char *json_req, ext_server_resp_t *resp);
  422. +
  423. + // Caller must call llama_server_release_task_result to free resp->json_resp
  424. + void llama_server_completion_next_result(const int task_id, ext_server_task_result_t *result);
  425. + void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err);
  426. + void llama_server_release_task_result(ext_server_task_result_t *result);
  427. +
  428. + // Caller must call llama_server_releaes_json_resp to free json_resp if err.id < 0
  429. + void llama_server_tokenize(const char *json_req, char **json_resp, ext_server_resp_t *err);
  430. + void llama_server_detokenize(const char *json_req, char **json_resp, ext_server_resp_t *err);
  431. + void llama_server_embedding(const char *json_req, char** json_resp, ext_server_resp_t *err);
  432. + void llama_server_release_json_resp(char **json_resp);
  433. +
  434. +#ifdef __cplusplus
  435. +}
  436. +#endif
  437. +
  438. +#endif
  439. +#endif // LLAMA_SERVER_LIBRARY
  440. \ No newline at end of file
  441. diff --git a/ggml-cuda.cu b/ggml-cuda.cu
  442. index f20846f..9640cf3 100644
  443. --- a/ggml-cuda.cu
  444. +++ b/ggml-cuda.cu
  445. @@ -6757,6 +6757,7 @@ static cudaError_t ggml_cuda_cpy_tensor_2d(
  446. CUDA_CHECK(cudaGetDevice(&id));
  447. src_ptr = (char *) extra->data_device[id];
  448. } else {
  449. + fprintf(stderr, "ggml_cuda_cpy_tensor_2d assert: backend: %d\n", src->backend);
  450. GGML_ASSERT(false);
  451. }
  452. char * dst_ptr = (char *) dst;
  453. --
  454. 2.39.3 (Apple Git-145)