123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324 |
- #include "ext_server.h"
- // Necessary evil since the server types are not defined in a header
- #include "server.cpp"
- // Low level API access to verify GPU access
- #if defined(GGML_USE_CUBLAS)
- #if defined(GGML_USE_HIPBLAS)
- #include <hip/hip_runtime.h>
- #include <hipblas/hipblas.h>
- #include <hip/hip_fp16.h>
- #ifdef __HIP_PLATFORM_AMD__
- // for rocblas_initialize()
- #include "rocblas/rocblas.h"
- #endif // __HIP_PLATFORM_AMD__
- #define cudaGetDevice hipGetDevice
- #define cudaError_t hipError_t
- #define cudaSuccess hipSuccess
- #define cudaGetErrorString hipGetErrorString
- #else
- #include <cuda_runtime.h>
- #include <cublas_v2.h>
- #include <cuda_fp16.h>
- #endif // defined(GGML_USE_HIPBLAS)
- #endif // GGML_USE_CUBLAS
- // Expose the llama server as a callable extern "C" API
- llama_server_context *llama = NULL;
- std::atomic<bool> ext_server_running(false);
- std::thread ext_server_thread;
- void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err) {
- assert(err != NULL && sparams != NULL);
- log_set_target(stderr);
- if (!sparams->verbose_logging) {
- log_disable();
- }
- LOG_TEE("system info: %s\n", llama_print_system_info());
- err->id = 0;
- err->msg[0] = '\0';
- try {
- llama = new llama_server_context;
- gpt_params params;
- params.n_ctx = sparams->n_ctx;
- params.n_batch = sparams->n_batch;
- if (sparams->n_threads > 0) {
- params.n_threads = sparams->n_threads;
- }
- params.n_parallel = sparams->n_parallel;
- params.rope_freq_base = sparams->rope_freq_base;
- params.rope_freq_scale = sparams->rope_freq_scale;
- if (sparams->memory_f16) {
- params.cache_type_k = "f16";
- params.cache_type_v = "f16";
- } else {
- params.cache_type_k = "f32";
- params.cache_type_v = "f32";
- }
- params.n_gpu_layers = sparams->n_gpu_layers;
- params.main_gpu = sparams->main_gpu;
- params.use_mlock = sparams->use_mlock;
- params.use_mmap = sparams->use_mmap;
- params.numa = sparams->numa;
- params.embedding = sparams->embedding;
- if (sparams->model != NULL) {
- params.model = sparams->model;
- }
- if (sparams->lora_adapters != NULL) {
- for (ext_server_lora_adapter *la = sparams->lora_adapters; la != NULL;
- la = la->next) {
- params.lora_adapter.push_back(std::make_tuple(la->adapter, la->scale));
- }
- params.use_mmap = false;
- }
- if (sparams->mmproj != NULL) {
- params.mmproj = std::string(sparams->mmproj);
- }
- #if defined(GGML_USE_CUBLAS)
- // Before attempting to init the backend which will assert on error, verify the CUDA/ROCM GPU is accessible
- LOG_TEE("Performing pre-initialization of GPU\n");
- int id;
- cudaError_t cudaErr = cudaGetDevice(&id);
- if (cudaErr != cudaSuccess) {
- err->id = -1;
- snprintf(err->msg, err->msg_len, "Unable to init GPU: %s", cudaGetErrorString(cudaErr));
- return;
- }
- #endif
- llama_backend_init(params.numa);
- // load the model
- if (!llama->load_model(params)) {
- // TODO - consider modifying the logging logic or patching load_model so
- // we can capture more detailed error messages and pass them back to the
- // caller for better UX
- err->id = -1;
- snprintf(err->msg, err->msg_len, "error loading model %s",
- params.model.c_str());
- return;
- }
- llama->initialize();
- } catch (std::exception &e) {
- err->id = -1;
- snprintf(err->msg, err->msg_len, "exception %s", e.what());
- } catch (...) {
- err->id = -1;
- snprintf(err->msg, err->msg_len,
- "Unknown exception initializing llama server");
- }
- }
- void llama_server_start() {
- assert(llama != NULL);
- // TODO mutex to protect thread creation
- ext_server_thread = std::thread([&]() {
- ext_server_running = true;
- try {
- LOG_TEE("llama server main loop starting\n");
- ggml_time_init();
- while (ext_server_running.load()) {
- if (!llama->update_slots()) {
- LOG_TEE(
- "unexpected error in llama server update_slots - exiting main "
- "loop\n");
- break;
- }
- }
- } catch (std::exception &e) {
- LOG_TEE("caught exception in llama server main loop: %s\n", e.what());
- } catch (...) {
- LOG_TEE("caught unknown exception in llama server main loop\n");
- }
- LOG_TEE("\nllama server shutting down\n");
- llama_backend_free();
- });
- }
- void llama_server_stop() {
- assert(llama != NULL);
- // TODO - too verbose, remove once things are solid
- LOG_TEE("requesting llama server shutdown\n");
- ext_server_running = false;
- // unblocks the update_slots() loop so it can clean up and exit
- llama->request_cancel(0);
- ext_server_thread.join();
- delete llama;
- llama = NULL;
- LOG_TEE("llama server shutdown complete\n");
- }
- void llama_server_completion(const char *json_req, ext_server_resp_t *resp) {
- assert(llama != NULL && json_req != NULL && resp != NULL);
- resp->id = -1;
- resp->msg[0] = '\0';
- try {
- json data = json::parse(json_req);
- resp->id = llama->request_completion(data, false, false, -1);
- } catch (std::exception &e) {
- snprintf(resp->msg, resp->msg_len, "exception %s", e.what());
- } catch (...) {
- snprintf(resp->msg, resp->msg_len, "Unknown exception during completion");
- }
- }
- void llama_server_completion_next_result(const int task_id,
- ext_server_task_result_t *resp) {
- assert(llama != NULL && resp != NULL);
- std::string msg;
- resp->id = -1;
- resp->stop = false;
- resp->error = false;
- resp->json_resp = NULL;
- std::string result_json;
- try {
- task_result result = llama->next_result(task_id);
- result_json =
- result.result_json.dump(-1, ' ', false, json::error_handler_t::replace);
- resp->id = result.id;
- resp->stop = result.stop;
- resp->error = result.error;
- if (result.error) {
- llama->request_cancel(task_id);
- } else if (result.stop) {
- llama->request_cancel(task_id);
- }
- } catch (std::exception &e) {
- resp->error = true;
- resp->id = -1;
- result_json = "{\"error\":\"exception " + std::string(e.what()) + "\"}";
- LOG_TEE("llama server completion exception %s\n", e.what());
- } catch (...) {
- resp->error = true;
- resp->id = -1;
- result_json = "{\"error\":\"Unknown exception during completion\"}";
- LOG_TEE("llama server completion unknown exception\n");
- }
- const std::string::size_type size = result_json.size() + 1;
- resp->json_resp = new char[size];
- snprintf(resp->json_resp, size, "%s", result_json.c_str());
- }
- void llama_server_release_task_result(ext_server_task_result_t *result) {
- if (result == NULL || result->json_resp == NULL) {
- return;
- }
- delete[] result->json_resp;
- }
- void llama_server_completion_cancel(const int task_id, ext_server_resp_t *err) {
- assert(llama != NULL && err != NULL);
- err->id = 0;
- err->msg[0] = '\0';
- try {
- llama->request_cancel(task_id);
- } catch (std::exception &e) {
- err->id = -1;
- snprintf(err->msg, err->msg_len, "exception %s", e.what());
- } catch (...) {
- err->id = -1;
- snprintf(err->msg, err->msg_len,
- "Unknown exception completion cancel in llama server");
- }
- }
- void llama_server_tokenize(const char *json_req, char **json_resp,
- ext_server_resp_t *err) {
- assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
- *json_resp = NULL;
- err->id = 0;
- err->msg[0] = '\0';
- try {
- const json body = json::parse(json_req);
- std::vector<llama_token> tokens;
- if (body.count("content") != 0) {
- tokens = llama->tokenize(body["content"], false);
- }
- const json data = format_tokenizer_response(tokens);
- std::string result_json = data.dump();
- const std::string::size_type size = result_json.size() + 1;
- *json_resp = new char[size];
- snprintf(*json_resp, size, "%s", result_json.c_str());
- } catch (std::exception &e) {
- err->id = -1;
- snprintf(err->msg, err->msg_len, "exception %s", e.what());
- } catch (...) {
- err->id = -1;
- snprintf(err->msg, err->msg_len, "Unknown exception during tokenize");
- }
- }
- void llama_server_release_json_resp(char **json_resp) {
- if (json_resp == NULL || *json_resp == NULL) {
- return;
- }
- delete[] *json_resp;
- }
- void llama_server_detokenize(const char *json_req, char **json_resp,
- ext_server_resp_t *err) {
- assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
- *json_resp = NULL;
- err->id = 0;
- err->msg[0] = '\0';
- try {
- const json body = json::parse(json_req);
- std::string content;
- if (body.count("tokens") != 0) {
- const std::vector<llama_token> tokens = body["tokens"];
- content = tokens_to_str(llama->ctx, tokens.cbegin(), tokens.cend());
- }
- const json data = format_detokenized_response(content);
- std::string result_json = data.dump();
- const std::string::size_type size = result_json.size() + 1;
- *json_resp = new char[size];
- snprintf(*json_resp, size, "%s", result_json.c_str());
- } catch (std::exception &e) {
- err->id = -1;
- snprintf(err->msg, err->msg_len, "exception %s", e.what());
- } catch (...) {
- err->id = -1;
- snprintf(err->msg, err->msg_len, "Unknown exception during detokenize");
- }
- }
- void llama_server_embedding(const char *json_req, char **json_resp,
- ext_server_resp_t *err) {
- assert(llama != NULL && json_req != NULL && json_resp != NULL && err != NULL);
- *json_resp = NULL;
- err->id = 0;
- err->msg[0] = '\0';
- try {
- const json body = json::parse(json_req);
- json prompt;
- if (body.count("content") != 0) {
- prompt = body["content"];
- } else {
- prompt = "";
- }
- const int task_id = llama->request_completion(
- {{"prompt", prompt}, {"n_predict", 0}}, false, true, -1);
- task_result result = llama->next_result(task_id);
- std::string result_json = result.result_json.dump();
- const std::string::size_type size = result_json.size() + 1;
- *json_resp = new char[size];
- snprintf(*json_resp, size, "%s", result_json.c_str());
- } catch (std::exception &e) {
- err->id = -1;
- snprintf(err->msg, err->msg_len, "exception %s", e.what());
- } catch (...) {
- err->id = -1;
- snprintf(err->msg, err->msg_len, "Unknown exception during embedding");
- }
- }
|