0001-Expose-callable-API-for-server.patch 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. From 64b3fbb150d12b3ca63ac2fb4e57bc46f41d2ccd Mon Sep 17 00:00:00 2001
  2. From: Daniel Hiltgen <daniel@ollama.com>
  3. Date: Mon, 13 Nov 2023 12:25:58 -0800
  4. Subject: [PATCH] Expose callable API for server
  5. This adds an extern "C" interface within the example server
  6. ---
  7. examples/server/CMakeLists.txt | 24 ++++
  8. examples/server/server.cpp | 247 +++++++++++++++++++++++++++++++++
  9. examples/server/server.h | 83 +++++++++++
  10. ggml-cuda.cu | 1 +
  11. 4 files changed, 355 insertions(+)
  12. create mode 100644 examples/server/server.h
  13. diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt
  14. index 859cd12..4ea47a7 100644
  15. --- a/examples/server/CMakeLists.txt
  16. +++ b/examples/server/CMakeLists.txt
  17. @@ -11,3 +11,27 @@ if (WIN32)
  18. TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
  19. endif()
  20. target_compile_features(${TARGET} PRIVATE cxx_std_11)
  21. +
  22. +set(TARGET ext_server)
  23. +option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
  24. +add_library(${TARGET} STATIC server.cpp)
  25. +target_include_directories(${TARGET} PRIVATE ../../common)
  26. +target_include_directories(${TARGET} PRIVATE ../..)
  27. +target_compile_features(${TARGET} PRIVATE cxx_std_11)
  28. +target_compile_definitions(${TARGET} PUBLIC LLAMA_SERVER_LIBRARY=1)
  29. +target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT})
  30. +
  31. +if (BUILD_SHARED_LIBS)
  32. + set_target_properties(ext_server PROPERTIES POSITION_INDEPENDENT_CODE ON)
  33. + target_compile_definitions(ext_server PRIVATE LLAMA_SHARED LLAMA_BUILD)
  34. + add_library(ext_server_shared SHARED $<TARGET_OBJECTS:ext_server>)
  35. + target_link_libraries(ext_server_shared PRIVATE ggml llama llava common ${CMAKE_THREAD_LIBS_INIT})
  36. + install(TARGETS ext_server_shared LIBRARY)
  37. +endif()
  38. +
  39. +if (CUDAToolkit_FOUND)
  40. + target_include_directories(${TARGET} PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
  41. + if (WIN32)
  42. + target_link_libraries(ext_server_shared PRIVATE nvml)
  43. + endif()
  44. +endif()
  45. \ No newline at end of file
  46. diff --git a/examples/server/server.cpp b/examples/server/server.cpp
  47. index 895f751..f939590 100644
  48. --- a/examples/server/server.cpp
  49. +++ b/examples/server/server.cpp
  50. @@ -5,6 +5,9 @@
  51. #include "../llava/clip.h"
  52. #include "stb_image.h"
  53. +#if defined(LLAMA_SERVER_LIBRARY)
  54. +#include "server.h"
  55. +#endif
  56. #ifndef NDEBUG
  57. // crash the server in debug mode, otherwise send an http 500 error
  58. @@ -2631,6 +2634,7 @@ static void append_to_generated_text_from_generated_token_probs(llama_server_con
  59. }
  60. }
  61. +#ifndef LLAMA_SERVER_LIBRARY
  62. int main(int argc, char **argv)
  63. {
  64. // own arguments required by this example
  65. @@ -3065,3 +3069,246 @@ int main(int argc, char **argv)
  66. llama_backend_free();
  67. return 0;
  68. }
  69. +
  70. +#else // LLAMA_SERVER_LIBRARY
  71. +// Expose the llama server as a callable extern "C" API
  72. +llama_server_context llama;
  73. +std::atomic<bool> ext_server_running(false);
  74. +std::thread ext_server_thread;
  75. +inline ext_server_err makeErr(uint32_t code, std::string msg) {
  76. + if (code == 0) {
  77. + return ext_server_err{0, NULL};
  78. + }
  79. + const std::string::size_type size = msg.size();
  80. + ext_server_err ret = {
  81. + code,
  82. + new char[size + 1],
  83. + };
  84. + memcpy(ret.err, msg.c_str(), size + 1);
  85. + return ret;
  86. +}
  87. +
  88. +ext_server_err llama_server_init(ext_server_params *sparams)
  89. +{
  90. + log_set_target(stdout);
  91. + gpt_params params;
  92. + params.n_ctx = sparams->n_ctx;
  93. + params.n_batch = sparams->n_batch;
  94. + params.n_threads = sparams->n_threads;
  95. + params.n_parallel = sparams->n_parallel;
  96. + params.rope_freq_base = sparams->rope_freq_base;
  97. + params.rope_freq_scale = sparams->rope_freq_scale;
  98. +
  99. + if (sparams->memory_f16) {
  100. + params.cache_type_k = "f16";
  101. + params.cache_type_v = "f16";
  102. + } else {
  103. + params.cache_type_k = "f32";
  104. + params.cache_type_v = "f32";
  105. + }
  106. +
  107. + params.n_gpu_layers = sparams->n_gpu_layers;
  108. + params.main_gpu = sparams->main_gpu;
  109. + params.use_mlock = sparams->use_mlock;
  110. + params.use_mmap = sparams->use_mmap;
  111. + params.numa = sparams->numa;
  112. + params.embedding = sparams->embedding;
  113. + if (sparams->model != NULL) {
  114. + params.model = sparams->model;
  115. + }
  116. +
  117. + for (ext_server_lora_adapter *la = sparams->lora_adapters; la != NULL; la = la->next) {
  118. + params.lora_adapter.push_back(std::make_tuple(la->adapter, la->scale));
  119. + }
  120. +
  121. + try {
  122. + llama_backend_init(params.numa);
  123. +
  124. + // load the model
  125. + if (!llama.load_model(params))
  126. + {
  127. + // TODO - consider modifying the logging logic or patching load_model so we can capture more detailed error messages
  128. + // and pass them back to the caller for better UX
  129. + return makeErr(1, "error loading model " + params.model);
  130. + }
  131. +
  132. + llama.initialize();
  133. + } catch (std::exception &e) {
  134. + return makeErr(1, e.what());
  135. + } catch (...) {
  136. + return makeErr(1, "Unknown Exception initializing llama server");
  137. + }
  138. + return makeErr(0, "");
  139. +}
  140. +
  141. +void llama_server_start()
  142. +{
  143. + // TODO mutex to protect thread creation
  144. + ext_server_thread = std::thread([&]()
  145. + {
  146. + ext_server_running = true;
  147. + try {
  148. + LOG_TEE("llama server main loop starting\n");
  149. + ggml_time_init();
  150. + while (ext_server_running.load())
  151. + {
  152. + if (!llama.update_slots()) {
  153. + LOG_TEE("unexpected error in llama server update_slots - exiting main loop\n");
  154. + break;
  155. + }
  156. + }
  157. + } catch (std::exception &e) {
  158. + LOG_TEE("caught exception in llama server main loop: %s\n", e.what());
  159. + } catch (...) {
  160. + LOG_TEE("caught unknown exception in llama server main loop\n");
  161. + }
  162. + LOG_TEE("\nllama server shutting down\n");
  163. + llama_backend_free();
  164. + });
  165. +}
  166. +
  167. +void llama_server_stop() {
  168. + // TODO - too verbose, remove once things are solid
  169. + LOG_TEE("requesting llama server shutdown\n");
  170. + ext_server_running = false;
  171. + ext_server_thread.join();
  172. + LOG_TEE("llama server shutdown complete\n");
  173. +}
  174. +
  175. +ext_server_completion_resp llama_server_completion(const char *json_req) {
  176. + std::string msg;
  177. + ext_server_completion_resp resp = {
  178. + 0,
  179. + NULL,
  180. + };
  181. + try {
  182. + json data = json::parse(json_req);
  183. + resp.task_id = llama.request_completion(data, false, false, -1);
  184. + return resp;
  185. + } catch (std::exception &e) {
  186. + msg = e.what();
  187. + } catch (...) {
  188. + msg = "Unknown Exception during completion";
  189. + }
  190. + const std::string::size_type size = msg.size();
  191. + resp.task_id = 0;
  192. + resp.err = new char[size + 1];
  193. + memcpy(resp.err, msg.c_str(), size + 1);
  194. + return resp;
  195. +}
  196. +
  197. +ext_task_result llama_server_completion_next_result(const int task_id) {
  198. + std::string msg;
  199. + ext_task_result resp = {-1,false,false,NULL};
  200. + try {
  201. + task_result result = llama.next_result(task_id);
  202. + std::string result_json = result.result_json.dump(-1, ' ', false, json::error_handler_t::replace);
  203. + const std::string::size_type size = result_json.size();
  204. + resp.id = result.id;
  205. + resp.stop = result.stop;
  206. + resp.error = result.error;
  207. + resp.result_json = new char[size + 1];
  208. + memcpy(resp.result_json, result_json.c_str(), size + 1);
  209. + if (result.error) {
  210. + llama.request_cancel(task_id);
  211. + } else if (result.stop) {
  212. + llama.request_cancel(task_id);
  213. + }
  214. + return resp;
  215. + } catch (std::exception &e) {
  216. + msg = e.what(); // TODO - json?
  217. + } catch (...) {
  218. + msg = "Unknown Exception during completion";
  219. + }
  220. + resp.error = true;
  221. + const std::string::size_type size = msg.size();
  222. + resp.result_json = new char[size + 1];
  223. + memcpy(resp.result_json, msg.c_str(), size + 1);
  224. + return resp;
  225. +}
  226. +
  227. +ext_server_err llama_server_completion_cancel(const int task_id) {
  228. + try {
  229. + llama.request_cancel(task_id);
  230. + } catch (std::exception &e) {
  231. + return makeErr(1, e.what());
  232. + } catch (...) {
  233. + return makeErr(1, "Unknown Exception running llama server");
  234. + }
  235. + return makeErr(0, "");
  236. +}
  237. +
  238. +
  239. +ext_server_err llama_server_tokenize(const char *json_req, ext_server_resp *resp) {
  240. + resp->json_resp = NULL;
  241. + try {
  242. + const json body = json::parse(json_req);
  243. + std::vector<llama_token> tokens;
  244. + if (body.count("content") != 0)
  245. + {
  246. + tokens = llama.tokenize(body["content"], false);
  247. + }
  248. + const json data = format_tokenizer_response(tokens);
  249. + std::string result_json = data.dump();
  250. + const std::string::size_type size = result_json.size();
  251. + resp->json_resp = new char[size + 1];
  252. + memcpy(resp->json_resp, result_json.c_str(), size + 1);
  253. + } catch (std::exception &e) {
  254. + return makeErr(1, e.what());
  255. + } catch (...) {
  256. + return makeErr(1, "Unknown Exception during tokenize");
  257. + }
  258. + return makeErr(0, "");
  259. +}
  260. +
  261. +ext_server_err llama_server_detokenize(const char *json_req, ext_server_resp *resp) {
  262. + resp->json_resp = NULL;
  263. + try {
  264. + const json body = json::parse(json_req);
  265. + std::string content;
  266. + if (body.count("tokens") != 0)
  267. + {
  268. + const std::vector<llama_token> tokens = body["tokens"];
  269. + content = tokens_to_str(llama.ctx, tokens.cbegin(), tokens.cend());
  270. + }
  271. + const json data = format_detokenized_response(content);
  272. + std::string result_json = data.dump();
  273. + const std::string::size_type size = result_json.size();
  274. + resp->json_resp = new char[size + 1];
  275. + memcpy(resp->json_resp, result_json.c_str(), size + 1);
  276. + } catch (std::exception &e) {
  277. + return makeErr(1, e.what());
  278. + } catch (...) {
  279. + return makeErr(1, "Unknown Exception during detokenize");
  280. + }
  281. + return makeErr(0, "");
  282. +}
  283. +
  284. +ext_server_err llama_server_embedding(const char *json_req, ext_server_resp *resp) {
  285. + resp->json_resp = NULL;
  286. + try {
  287. + const json body = json::parse(json_req);
  288. + json prompt;
  289. + if (body.count("content") != 0)
  290. + {
  291. + prompt = body["content"];
  292. + }
  293. + else
  294. + {
  295. + prompt = "";
  296. + }
  297. + const int task_id = llama.request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false, true, -1);
  298. + task_result result = llama.next_result(task_id);
  299. + std::string result_json = result.result_json.dump();
  300. + const std::string::size_type size = result_json.size();
  301. + resp->json_resp = new char[size + 1];
  302. + memcpy(resp->json_resp, result_json.c_str(), size + 1);
  303. + } catch (std::exception &e) {
  304. + return makeErr(1, e.what());
  305. + } catch (...) {
  306. + return makeErr(1, "Unknown Exception during detokenize");
  307. + }
  308. + return makeErr(0, "");
  309. +}
  310. +
  311. +#endif // LLAMA_SERVER_LIBRARY
  312. \ No newline at end of file
  313. diff --git a/examples/server/server.h b/examples/server/server.h
  314. new file mode 100644
  315. index 0000000..4d03b1e
  316. --- /dev/null
  317. +++ b/examples/server/server.h
  318. @@ -0,0 +1,83 @@
  319. +#if defined(LLAMA_SERVER_LIBRARY)
  320. +#ifndef LLAMA_SERVER_H
  321. +#define LLAMA_SERVER_H
  322. +#include <stddef.h>
  323. +#include <stdint.h>
  324. +#include <stdio.h>
  325. +#include <stdbool.h>
  326. +
  327. +// This exposes extern C entrypoints into the llama_server
  328. +// To enable the server compile with LLAMA_SERVER_LIBRARY
  329. +
  330. +#ifdef __cplusplus
  331. +extern "C"
  332. +{
  333. +#endif
  334. + // TODO - clean the type def's up a bit for better consistency
  335. + typedef struct ext_server_err {
  336. + uint32_t code; // 0 on success, > 0 on error
  337. + char *err; // null if code == 0; else contains error message. Caller responsible for freeing memory
  338. + } ext_server_err;
  339. +
  340. + typedef struct ext_server_lora_adapter {
  341. + char *adapter;
  342. + float scale;
  343. + struct ext_server_lora_adapter *next;
  344. + } ext_server_lora_adapter;
  345. + typedef struct ext_server_params
  346. + {
  347. + char *model;
  348. + uint32_t n_ctx; // text context, 0 = from model
  349. + uint32_t n_batch; // prompt processing maximum batch size
  350. + uint32_t n_threads; // number of threads to use for generation
  351. + int32_t n_parallel; // number of parallel sequences to decodewra
  352. + float rope_freq_base; // RoPE base frequency, 0 = from model
  353. + float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
  354. + bool memory_f16; // use f16 instead of f32 for memory kv
  355. + int32_t n_gpu_layers; // number of layers to store in VRAM (-1 - use default)
  356. + int32_t main_gpu; // the GPU that is used for scratch and small tensors
  357. + bool use_mlock; // force system to keep model in RAM
  358. + bool use_mmap; // use mmap if possible
  359. + bool numa; // attempt optimizations that help on some NUMA systems
  360. + bool embedding; // get only sentence embedding
  361. + ext_server_lora_adapter* lora_adapters;
  362. + } ext_server_params;
  363. +
  364. + // Initialize the server once per process
  365. + ext_server_err llama_server_init(ext_server_params *sparams);
  366. +
  367. + // Run the main loop
  368. + void llama_server_start();
  369. + // Stop the main loop
  370. + void llama_server_stop();
  371. +
  372. + typedef struct ext_task_result
  373. + {
  374. + int id;
  375. + bool stop;
  376. + bool error;
  377. + char* result_json; // caller responsible to free this memory
  378. + } ext_task_result;
  379. +
  380. + typedef struct ext_server_completion_resp {
  381. + int task_id; // < 0 on error, >= 0 on success
  382. + char *err; // null if task_id >= 0; else contains error message. Caller responsible for freeing memory
  383. + } ext_server_completion_resp;
  384. + ext_server_completion_resp llama_server_completion(const char *json_req);
  385. + ext_task_result llama_server_completion_next_result(const int task_id);
  386. + ext_server_err llama_server_completion_cancel(const int task_id);
  387. +
  388. + // Caller responsible for freeing json_resp
  389. + typedef struct ext_server_resp {
  390. + char *json_resp; // Caller responsible for freeing string
  391. + } ext_server_resp;
  392. + ext_server_err llama_server_tokenize(const char *json_req, ext_server_resp *resp);
  393. + ext_server_err llama_server_detokenize(const char *json_req, ext_server_resp *resp);
  394. + ext_server_err llama_server_embedding(const char *json_req, ext_server_resp *resp);
  395. +
  396. +#ifdef __cplusplus
  397. +}
  398. +#endif
  399. +
  400. +#endif
  401. +#endif // LLAMA_SERVER_LIBRARY
  402. \ No newline at end of file
  403. diff --git a/ggml-cuda.cu b/ggml-cuda.cu
  404. index 85f7a29..ce51364 100644
  405. --- a/ggml-cuda.cu
  406. +++ b/ggml-cuda.cu
  407. @@ -6410,6 +6410,7 @@ static cudaError_t ggml_cuda_cpy_tensor_2d(
  408. CUDA_CHECK(cudaGetDevice(&id));
  409. src_ptr = (char *) extra->data_device[id];
  410. } else {
  411. + fprintf(stderr, "ggml_cuda_cpy_tensor_2d assert: backend: %d\n", src->backend);
  412. GGML_ASSERT(false);
  413. }
  414. char * dst_ptr = (char *) dst;
  415. --
  416. 2.39.3 (Apple Git-145)