123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464 |
- From 90c332fe2ef61149b38561d02836e66715df214d Mon Sep 17 00:00:00 2001
- From: Daniel Hiltgen <daniel@ollama.com>
- Date: Mon, 13 Nov 2023 12:25:58 -0800
- Subject: [PATCH] Expose callable API for server
- This adds an extern "C" interface within the example server
- ---
- examples/server/CMakeLists.txt | 27 ++++
- examples/server/server.cpp | 280 +++++++++++++++++++++++++++++++++
- examples/server/server.h | 89 +++++++++++
- ggml-cuda.cu | 1 +
- 4 files changed, 397 insertions(+)
- create mode 100644 examples/server/server.h
- diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt
- index 859cd12..da2b9bf 100644
- --- a/examples/server/CMakeLists.txt
- +++ b/examples/server/CMakeLists.txt
- @@ -11,3 +11,30 @@ if (WIN32)
- TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
- endif()
- target_compile_features(${TARGET} PRIVATE cxx_std_11)
- +
- +set(TARGET ext_server)
- +option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
- +add_library(${TARGET} STATIC server.cpp)
- +target_include_directories(${TARGET} PRIVATE ../../common)
- +target_include_directories(${TARGET} PRIVATE ../..)
- +target_compile_features(${TARGET} PRIVATE cxx_std_11)
- +target_compile_definitions(${TARGET} PUBLIC LLAMA_SERVER_LIBRARY=1)
- +target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT})
- +target_compile_definitions(${TARGET} PRIVATE
- + SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
- +)
- +
- +if (BUILD_SHARED_LIBS)
- + set_target_properties(ext_server PROPERTIES POSITION_INDEPENDENT_CODE ON)
- + target_compile_definitions(ext_server PRIVATE LLAMA_SHARED LLAMA_BUILD)
- + add_library(ext_server_shared SHARED $<TARGET_OBJECTS:ext_server>)
- + target_link_libraries(ext_server_shared PRIVATE ggml llama llava common ${CMAKE_THREAD_LIBS_INIT})
- + install(TARGETS ext_server_shared LIBRARY)
- +endif()
- +
- +if (CUDAToolkit_FOUND)
- + target_include_directories(${TARGET} PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
- + if (WIN32)
- + target_link_libraries(ext_server_shared PRIVATE nvml)
- + endif()
- +endif()
- \ No newline at end of file
- diff --git a/examples/server/server.cpp b/examples/server/server.cpp
- index 0403853..07fb05c 100644
- --- a/examples/server/server.cpp
- +++ b/examples/server/server.cpp
- @@ -5,6 +5,9 @@
- #include "../llava/clip.h"
-
- #include "stb_image.h"
- +#if defined(LLAMA_SERVER_LIBRARY)
- +#include "server.h"
- +#endif
-
- #ifndef NDEBUG
- // crash the server in debug mode, otherwise send an http 500 error
- @@ -2643,6 +2646,7 @@ static void append_to_generated_text_from_generated_token_probs(llama_server_con
- }
- }
-
- +#ifndef LLAMA_SERVER_LIBRARY
- int main(int argc, char **argv)
- {
- #if SERVER_VERBOSE != 1
- @@ -3123,3 +3127,279 @@ int main(int argc, char **argv)
- llama_backend_free();
- return 0;
- }
- +
- +#else // LLAMA_SERVER_LIBRARY
- +// Expose the llama server as a callable extern "C" API
- +llama_server_context *llama = NULL;
- +std::atomic<bool> ext_server_running(false);
- +std::thread ext_server_thread;
- +
- +void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err)
- +{
- +#if SERVER_VERBOSE != 1
- + LOG_TEE("disabling verbose llm logging\n");
- + log_disable();
- +#endif
- + assert(err != NULL && sparams != NULL);
- + err->id = 0;
- + err->msg[0] = '\0';
- + try {
- + llama = new llama_server_context;
- + log_set_target(stdout);
- + gpt_params params;
- + params.n_ctx = sparams->n_ctx;
- + params.n_batch = sparams->n_batch;
- + if (sparams->n_threads > 0) {
- + params.n_threads = sparams->n_threads;
- + }
- + params.n_parallel = sparams->n_parallel;
- + params.rope_freq_base = sparams->rope_freq_base;
- + params.rope_freq_scale = sparams->rope_freq_scale;
- +
- + if (sparams->memory_f16) {
- + params.cache_type_k = "f16";
- + params.cache_type_v = "f16";
- + } else {
- + params.cache_type_k = "f32";
- + params.cache_type_v = "f32";
- + }
- +
- + params.n_gpu_layers = sparams->n_gpu_layers;
- + params.main_gpu = sparams->main_gpu;
- + params.use_mlock = sparams->use_mlock;
- + params.use_mmap = sparams->use_mmap;
- + params.numa = sparams->numa;
- + params.embedding = sparams->embedding;
- + if (sparams->model != NULL) {
- + params.model = sparams->model;
- + }
- +
- + for (ext_server_lora_adapter *la = sparams->lora_adapters; la != NULL; la = la->next) {
- + params.lora_adapter.push_back(std::make_tuple(la->adapter, la->scale));
- + }
- +
- + if (sparams->mmproj != NULL) {
- + params.mmproj = std::string(sparams->mmproj);
- + }
- +
- + llama_backend_init(params.numa);
- +
- + // load the model
- + if (!llama->load_model(params))
- + {
- + // TODO - consider modifying the logging logic or patching load_model so we can capture more detailed error messages
- + // and pass them back to the caller for better UX
- + err->id = -1;
- + snprintf(err->msg, err->msg_len, "error loading model %s", params.model.c_str());
- + return;
- + }
- +
- + llama->initialize();
- + } catch (std::exception &e) {
- + err->id = -1;
- + snprintf(err->msg, err->msg_len, "exception %s", e.what());
- + } catch (...) {
- + err->id = -1;
- + snprintf(err->msg, err->msg_len, "Unknown exception initializing llama server");
- + }
- +}
- +
- +void llama_server_start()
- +{
- + assert(llama != NULL);
- + // TODO mutex to protect thread creation
- + ext_server_thread = std::thread([&]()
- + {
- + ext_server_running = true;
- + try {
- + LOG_TEE("llama server main loop starting\n");
- + ggml_time_init();
- + while (ext_server_running.load())
- + {
- + if (!llama->update_slots()) {
- + LOG_TEE("unexpected error in llama server update_slots - exiting main loop\n");
- + break;
- + }
- + }
- + } catch (std::exception &e) {
- + LOG_TEE("caught exception in llama server main loop: %s\n", e.what());
- + } catch (...) {
- + LOG_TEE("caught unknown exception in llama server main loop\n");
- + }
- + LOG_TEE("\nllama server shutting down\n");
- + llama_backend_free();
- + });
- +}
- +
- +void llama_server_stop() {
- + assert(llama != NULL);
- + // TODO - too verbose, remove once things are solid
- + LOG_TEE("requesting llama server shutdown\n");
- + ext_server_running = false;
- + ext_server_thread.join();
- + delete llama;
- + llama = NULL;
- + LOG_TEE("llama server shutdown complete\n");
- +}
- +
- +void llama_server_completion(const char *json_req, ext_server_resp_t *resp) {
- + assert(llama != NULL && json_req != NULL && resp != NULL);
- + resp->id = -1;
- + resp->msg[0] = '\0';
- + try {
- + json data = json::parse(json_req);
- + resp->id = llama->request_completion(data, false, false, -1);
- + } catch (std::exception &e) {
- + snprintf(resp->msg, resp->msg_len, "exception %s", e.what());
- + } catch (...) {
- + snprintf(resp->msg, resp->msg_len, "Unknown exception during completion");
- + }
- +}
- +
- +void llama_server_completion_next_result(const int task_id, ext_server_task_result_t *resp) {
- + assert(llama != NULL && resp != NULL);
- + std::string msg;
- + resp->id = -1;
- + resp->stop = false;
- + resp->error = false;
- + resp->json_resp = NULL;
- + std::string result_json;
- + try {
- + task_result result = llama->next_result(task_id);
- + result_json = result.result_json.dump(-1, ' ', false, json::error_handler_t::replace);
- + resp->id = result.id;
- + resp->stop = result.stop;
- + resp->error = result.error;
- + if (result.error) {
- + llama->request_cancel(task_id);
- + } else if (result.stop) {
- + llama->request_cancel(task_id);
- + }
- + } catch (std::exception &e) {
- + resp->error = true;
- + resp->id = -1;
- + result_json = "{\"error\":\"exception " + std::string(e.what()) + "\"}";
- + } catch (...) {
- + resp->error = true;
- + resp->id = -1;
- + result_json = "{\"error\":\"Unknown exception during completion\"}";
- + }
- + const std::string::size_type size = result_json.size() + 1;
- + resp->json_resp = new char[size];
- + snprintf(resp->json_resp, size, "%s", result_json.c_str());
- +}
- +
- +void llama_server_release_task_result(ext_server_task_result_t *result) {
- + if (result == NULL || result->json_resp == NULL) {
- + return;
- + }
- + delete[] result->json_resp;
- +}
- +
- +void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err) {
- + assert(llama != NULL && err != NULL);
- + err->id = 0;
- + err->msg[0] = '\0';
- + try {
- + llama->request_cancel(task_id);
- + } catch (std::exception &e) {
- + err->id = -1;
- + snprintf(err->msg, err->msg_len, "exception %s", e.what());
- + } catch (...) {
- + err->id = -1;
- + snprintf(err->msg, err->msg_len, "Unknown exception completion cancel in llama server");
- + }
- +}
- +
- +void llama_server_tokenize(const char *json_req, char **json_resp, ext_server_resp_t *err) {
- + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
- + *json_resp = NULL;
- + err->id = 0;
- + err->msg[0] = '\0';
- + try {
- + const json body = json::parse(json_req);
- + std::vector<llama_token> tokens;
- + if (body.count("content") != 0)
- + {
- + tokens = llama->tokenize(body["content"], false);
- + }
- + const json data = format_tokenizer_response(tokens);
- + std::string result_json = data.dump();
- + const std::string::size_type size = result_json.size() + 1;
- + *json_resp = new char[size];
- + snprintf(*json_resp, size, "%s", result_json.c_str());
- + } catch (std::exception &e) {
- + err->id = -1;
- + snprintf(err->msg, err->msg_len, "exception %s", e.what());
- + } catch (...) {
- + err->id = -1;
- + snprintf(err->msg, err->msg_len, "Unknown exception during tokenize");
- + }
- +}
- +
- +void llama_server_release_json_resp(char **json_resp) {
- + if (json_resp == NULL || *json_resp == NULL) {
- + return;
- + }
- + delete[] *json_resp;
- +}
- +
- +void llama_server_detokenize(const char *json_req, char **json_resp, ext_server_resp_t *err) {
- + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
- + *json_resp = NULL;
- + err->id = 0;
- + err->msg[0] = '\0';
- + try {
- + const json body = json::parse(json_req);
- + std::string content;
- + if (body.count("tokens") != 0)
- + {
- + const std::vector<llama_token> tokens = body["tokens"];
- + content = tokens_to_str(llama->ctx, tokens.cbegin(), tokens.cend());
- + }
- + const json data = format_detokenized_response(content);
- + std::string result_json = data.dump();
- + const std::string::size_type size = result_json.size() + 1;
- + *json_resp = new char[size];
- + snprintf(*json_resp, size, "%s", result_json.c_str());
- + } catch (std::exception &e) {
- + err->id = -1;
- + snprintf(err->msg, err->msg_len, "exception %s", e.what());
- + } catch (...) {
- + err->id = -1;
- + snprintf(err->msg, err->msg_len, "Unknown exception during detokenize");
- + }
- +}
- +
- +void llama_server_embedding(const char *json_req, char** json_resp, ext_server_resp_t *err) {
- + assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
- + *json_resp = NULL;
- + err->id = 0;
- + err->msg[0] = '\0';
- + try {
- + const json body = json::parse(json_req);
- + json prompt;
- + if (body.count("content") != 0)
- + {
- + prompt = body["content"];
- + }
- + else
- + {
- + prompt = "";
- + }
- + const int task_id = llama->request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false, true, -1);
- + task_result result = llama->next_result(task_id);
- + std::string result_json = result.result_json.dump();
- + const std::string::size_type size = result_json.size() + 1;
- + *json_resp = new char[size];
- + snprintf(*json_resp, size, "%s", result_json.c_str());
- + } catch (std::exception &e) {
- + err->id = -1;
- + snprintf(err->msg, err->msg_len, "exception %s", e.what());
- + } catch (...) {
- + err->id = -1;
- + snprintf(err->msg, err->msg_len, "Unknown exception during embedding");
- + }
- +}
- +
- +#endif // LLAMA_SERVER_LIBRARY
- \ No newline at end of file
- diff --git a/examples/server/server.h b/examples/server/server.h
- new file mode 100644
- index 0000000..d22f1b6
- --- /dev/null
- +++ b/examples/server/server.h
- @@ -0,0 +1,89 @@
- +#if defined(LLAMA_SERVER_LIBRARY)
- +#ifndef LLAMA_SERVER_H
- +#define LLAMA_SERVER_H
- +#include <stddef.h>
- +#include <stdint.h>
- +#include <stdio.h>
- +#include <stdbool.h>
- +
- +// This exposes extern C entrypoints into the llama_server
- +// To enable the server compile with LLAMA_SERVER_LIBRARY
- +
- +#ifdef __cplusplus
- +extern "C"
- +{
- +#endif
- + typedef struct ext_server_resp {
- + int id; // < 0 on error
- + size_t msg_len; // caller must allocate msg and set msg_len
- + char *msg;
- + } ext_server_resp_t;
- +
- + // Allocated and freed by caller
- + typedef struct ext_server_lora_adapter {
- + char *adapter;
- + float scale;
- + struct ext_server_lora_adapter *next;
- + } ext_server_lora_adapter_t;
- +
- + // Allocated and freed by caller
- + typedef struct ext_server_params
- + {
- + char *model;
- + uint32_t n_ctx; // text context, 0 = from model
- + uint32_t n_batch; // prompt processing maximum batch size
- + uint32_t n_threads; // number of threads to use for generation
- + int32_t n_parallel; // number of parallel sequences to decodewra
- + float rope_freq_base; // RoPE base frequency, 0 = from model
- + float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
- + bool memory_f16; // use f16 instead of f32 for memory kv
- + int32_t n_gpu_layers; // number of layers to store in VRAM (-1 - use default)
- + int32_t main_gpu; // the GPU that is used for scratch and small tensors
- + bool use_mlock; // force system to keep model in RAM
- + bool use_mmap; // use mmap if possible
- + bool numa; // attempt optimizations that help on some NUMA systems
- + bool embedding; // get only sentence embedding
- + ext_server_lora_adapter_t* lora_adapters;
- + char *mmproj;
- + } ext_server_params_t;
- +
- + typedef struct ext_server_task_result
- + {
- + int id;
- + bool stop;
- + bool error;
- + char* json_resp; // null terminated, memory managed by ext_server
- + } ext_server_task_result_t;
- +
- + // Initialize the server once per process
- + // err->id = 0 for success and err->msg[0] = NULL
- + // err->id != 0 for failure, and err->msg contains error message
- + void llama_server_init(ext_server_params_t *sparams, ext_server_resp_t *err);
- +
- + // Run the main loop, called once per init
- + void llama_server_start();
- + // Stop the main loop and free up resources allocated in init and start. Init must be called again to reuse
- + void llama_server_stop();
- +
- + // json_req null terminated string, memory managed by caller
- + // resp->id >= 0 on success (task ID)
- + // resp->id < 0 on error, and resp->msg contains error message
- + void llama_server_completion(const char *json_req, ext_server_resp_t *resp);
- +
- + // Caller must call llama_server_release_task_result to free resp->json_resp
- + void llama_server_completion_next_result(const int task_id, ext_server_task_result_t *result);
- + void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err);
- + void llama_server_release_task_result(ext_server_task_result_t *result);
- +
- + // Caller must call llama_server_releaes_json_resp to free json_resp if err.id < 0
- + void llama_server_tokenize(const char *json_req, char **json_resp, ext_server_resp_t *err);
- + void llama_server_detokenize(const char *json_req, char **json_resp, ext_server_resp_t *err);
- + void llama_server_embedding(const char *json_req, char** json_resp, ext_server_resp_t *err);
- + void llama_server_release_json_resp(char **json_resp);
- +
- +#ifdef __cplusplus
- +}
- +#endif
- +
- +#endif
- +#endif // LLAMA_SERVER_LIBRARY
- \ No newline at end of file
- diff --git a/ggml-cuda.cu b/ggml-cuda.cu
- index f20846f..9640cf3 100644
- --- a/ggml-cuda.cu
- +++ b/ggml-cuda.cu
- @@ -6757,6 +6757,7 @@ static cudaError_t ggml_cuda_cpy_tensor_2d(
- CUDA_CHECK(cudaGetDevice(&id));
- src_ptr = (char *) extra->data_device[id];
- } else {
- + fprintf(stderr, "ggml_cuda_cpy_tensor_2d assert: backend: %d\n", src->backend);
- GGML_ASSERT(false);
- }
- char * dst_ptr = (char *) dst;
- --
- 2.39.3 (Apple Git-145)
|