|
@@ -140,7 +140,6 @@ struct server_slot {
|
|
std::vector<llama_token> cache_tokens;
|
|
std::vector<llama_token> cache_tokens;
|
|
std::vector<completion_token_output> generated_token_probs;
|
|
std::vector<completion_token_output> generated_token_probs;
|
|
|
|
|
|
- bool infill = false;
|
|
|
|
bool embedding = false;
|
|
bool embedding = false;
|
|
bool has_next_token = true;
|
|
bool has_next_token = true;
|
|
bool truncated = false;
|
|
bool truncated = false;
|
|
@@ -187,7 +186,6 @@ struct server_slot {
|
|
n_past = 0;
|
|
n_past = 0;
|
|
n_sent_text = 0;
|
|
n_sent_text = 0;
|
|
n_sent_token_probs = 0;
|
|
n_sent_token_probs = 0;
|
|
- infill = false;
|
|
|
|
ga_i = 0;
|
|
ga_i = 0;
|
|
n_past_se = 0;
|
|
n_past_se = 0;
|
|
|
|
|
|
@@ -600,16 +598,6 @@ struct llama_server_context
|
|
slot->params.n_predict = slot->n_predict;
|
|
slot->params.n_predict = slot->n_predict;
|
|
}
|
|
}
|
|
|
|
|
|
- // infill
|
|
|
|
- if (data.count("input_prefix") != 0)
|
|
|
|
- {
|
|
|
|
- slot->params.input_prefix = data["input_prefix"];
|
|
|
|
- }
|
|
|
|
- else
|
|
|
|
- {
|
|
|
|
- slot->params.input_prefix = "";
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
if (data.count("input_suffix") != 0)
|
|
if (data.count("input_suffix") != 0)
|
|
{
|
|
{
|
|
slot->params.input_suffix = data["input_suffix"];
|
|
slot->params.input_suffix = data["input_suffix"];
|
|
@@ -897,15 +885,6 @@ struct llama_server_context
|
|
system_need_update = true;
|
|
system_need_update = true;
|
|
}
|
|
}
|
|
|
|
|
|
- void system_prompt_process(const json &sys_props) {
|
|
|
|
- system_prompt = sys_props.value("prompt", "");
|
|
|
|
- name_user = sys_props.value("anti_prompt", "");
|
|
|
|
- name_assistant = sys_props.value("assistant_name", "");
|
|
|
|
-
|
|
|
|
-
|
|
|
|
- system_prompt_notify();
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
static size_t find_stopping_strings(const std::string &text, const size_t last_token_size,
|
|
static size_t find_stopping_strings(const std::string &text, const size_t last_token_size,
|
|
const stop_type type, server_slot &slot)
|
|
const stop_type type, server_slot &slot)
|
|
{
|
|
{
|
|
@@ -1263,13 +1242,12 @@ struct llama_server_context
|
|
queue_results.send(res);
|
|
queue_results.send(res);
|
|
}
|
|
}
|
|
|
|
|
|
- void request_completion(int task_id, json data, bool infill, bool embedding, int multitask_id)
|
|
|
|
|
|
+ void request_completion(int task_id, json data, bool embedding, int multitask_id)
|
|
{
|
|
{
|
|
task_server task;
|
|
task_server task;
|
|
task.id = task_id;
|
|
task.id = task_id;
|
|
task.target_id = 0;
|
|
task.target_id = 0;
|
|
task.data = std::move(data);
|
|
task.data = std::move(data);
|
|
- task.infill_mode = infill;
|
|
|
|
task.embedding_mode = embedding;
|
|
task.embedding_mode = embedding;
|
|
task.type = TASK_TYPE_COMPLETION;
|
|
task.type = TASK_TYPE_COMPLETION;
|
|
task.multitask_id = multitask_id;
|
|
task.multitask_id = multitask_id;
|
|
@@ -1415,8 +1393,8 @@ struct llama_server_context
|
|
json subtask_data = multiprompt_task.data;
|
|
json subtask_data = multiprompt_task.data;
|
|
subtask_data["prompt"] = subtask_data["prompt"][i];
|
|
subtask_data["prompt"] = subtask_data["prompt"][i];
|
|
|
|
|
|
- // subtasks inherit everything else (infill mode, embedding mode, etc.)
|
|
|
|
- request_completion(subtask_ids[i], subtask_data, multiprompt_task.infill_mode, multiprompt_task.embedding_mode, multitask_id);
|
|
|
|
|
|
+ // subtasks inherit everything else (embedding mode, etc.)
|
|
|
|
+ request_completion(subtask_ids[i], subtask_data, multiprompt_task.embedding_mode, multitask_id);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1434,26 +1412,8 @@ struct llama_server_context
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- if (task.data.contains("system_prompt"))
|
|
|
|
- {
|
|
|
|
- if (!all_slots_are_idle) {
|
|
|
|
- send_error(task, "system prompt can only be updated when all slots are idle");
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- system_prompt_process(task.data["system_prompt"]);
|
|
|
|
-
|
|
|
|
- // reset cache_tokens for all slots
|
|
|
|
- for (server_slot &slot : slots)
|
|
|
|
- {
|
|
|
|
- slot.cache_tokens.clear();
|
|
|
|
- slot.n_past = 0;
|
|
|
|
- slot.n_past_se = 0;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
slot->reset();
|
|
slot->reset();
|
|
|
|
|
|
- slot->infill = task.infill_mode;
|
|
|
|
slot->embedding = task.embedding_mode;
|
|
slot->embedding = task.embedding_mode;
|
|
slot->task_id = task.id;
|
|
slot->task_id = task.id;
|
|
slot->multitask_id = task.multitask_id;
|
|
slot->multitask_id = task.multitask_id;
|
|
@@ -1679,8 +1639,7 @@ struct llama_server_context
|
|
const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get<std::string>().empty()) || !slot.images.empty();
|
|
const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get<std::string>().empty()) || !slot.images.empty();
|
|
|
|
|
|
// empty prompt passed -> release the slot and send empty response
|
|
// empty prompt passed -> release the slot and send empty response
|
|
- // note: infill mode allows empty prompt
|
|
|
|
- if (slot.state == IDLE && slot.command == LOAD_PROMPT && !has_prompt && !slot.infill)
|
|
|
|
|
|
+ if (slot.state == IDLE && slot.command == LOAD_PROMPT && !has_prompt)
|
|
{
|
|
{
|
|
slot.release();
|
|
slot.release();
|
|
slot.print_timings();
|
|
slot.print_timings();
|
|
@@ -1697,33 +1656,7 @@ struct llama_server_context
|
|
slot.t_start_process_prompt = ggml_time_us();
|
|
slot.t_start_process_prompt = ggml_time_us();
|
|
slot.t_start_genereration = 0;
|
|
slot.t_start_genereration = 0;
|
|
|
|
|
|
- if (slot.infill)
|
|
|
|
- {
|
|
|
|
- bool suff_rm_leading_spc = true;
|
|
|
|
- if (params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1)
|
|
|
|
- {
|
|
|
|
- params.input_suffix.erase(0, 1);
|
|
|
|
- suff_rm_leading_spc = false;
|
|
|
|
- }
|
|
|
|
- auto prefix_tokens = tokenize(slot.params.input_prefix, false);
|
|
|
|
- auto suffix_tokens = tokenize(slot.params.input_suffix, false);
|
|
|
|
-
|
|
|
|
- const int space_token = 29871; // TODO: this should not be hardcoded
|
|
|
|
- if (suff_rm_leading_spc && !suffix_tokens.empty() && suffix_tokens[0] == space_token) {
|
|
|
|
- suffix_tokens.erase(suffix_tokens.begin());
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- prefix_tokens.insert(prefix_tokens.begin(), llama_token_prefix(model));
|
|
|
|
- prefix_tokens.insert(prefix_tokens.begin(), llama_token_bos(model)); // always add BOS
|
|
|
|
- prefix_tokens.insert(prefix_tokens.end(), llama_token_suffix(model));
|
|
|
|
- prefix_tokens.insert(prefix_tokens.end(), suffix_tokens.begin(), suffix_tokens.end());
|
|
|
|
- prefix_tokens.push_back(llama_token_middle(model));
|
|
|
|
- prompt_tokens = prefix_tokens;
|
|
|
|
- }
|
|
|
|
- else
|
|
|
|
- {
|
|
|
|
- prompt_tokens = tokenize(slot.prompt, system_prompt.empty() && add_bos_token); // add BOS if there isn't system prompt
|
|
|
|
- }
|
|
|
|
|
|
+ prompt_tokens = tokenize(slot.prompt, system_prompt.empty() && add_bos_token); // add BOS if there isn't system prompt
|
|
|
|
|
|
slot.n_prompt_tokens = prompt_tokens.size();
|
|
slot.n_prompt_tokens = prompt_tokens.size();
|
|
|
|
|
|
@@ -2130,8 +2063,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms,
|
|
printf("\n");
|
|
printf("\n");
|
|
}
|
|
}
|
|
|
|
|
|
-static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|
|
|
- gpt_params ¶ms, llama_server_context& llama)
|
|
|
|
|
|
+static void server_params_parse(int argc, char **argv, server_params &sparams, gpt_params ¶ms)
|
|
{
|
|
{
|
|
gpt_params default_params;
|
|
gpt_params default_params;
|
|
server_params default_sparams;
|
|
server_params default_sparams;
|
|
@@ -2546,27 +2478,6 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|
}
|
|
}
|
|
params.n_predict = std::stoi(argv[i]);
|
|
params.n_predict = std::stoi(argv[i]);
|
|
}
|
|
}
|
|
- else if (arg == "-spf" || arg == "--system-prompt-file")
|
|
|
|
- {
|
|
|
|
- if (++i >= argc)
|
|
|
|
- {
|
|
|
|
- invalid_param = true;
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- std::ifstream file(argv[i]);
|
|
|
|
- if (!file) {
|
|
|
|
- fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
|
|
|
|
- invalid_param = true;
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- std::string systm_content;
|
|
|
|
- std::copy(
|
|
|
|
- std::istreambuf_iterator<char>(file),
|
|
|
|
- std::istreambuf_iterator<char>(),
|
|
|
|
- std::back_inserter(systm_content)
|
|
|
|
- );
|
|
|
|
- llama.system_prompt_process(json::parse(systm_content));
|
|
|
|
- }
|
|
|
|
else if (arg == "-ctk" || arg == "--cache-type-k") {
|
|
else if (arg == "-ctk" || arg == "--cache-type-k") {
|
|
params.cache_type_k = argv[++i];
|
|
params.cache_type_k = argv[++i];
|
|
}
|
|
}
|
|
@@ -2714,21 +2625,6 @@ static json format_partial_response(
|
|
return res;
|
|
return res;
|
|
}
|
|
}
|
|
|
|
|
|
-static json format_tokenizer_response(const std::vector<llama_token> &tokens)
|
|
|
|
-{
|
|
|
|
- return json {
|
|
|
|
- {"tokens", tokens}
|
|
|
|
- };
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static json format_detokenized_response(std::string content)
|
|
|
|
-{
|
|
|
|
- return json {
|
|
|
|
- {"content", content}
|
|
|
|
- };
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-
|
|
|
|
static void log_server_request(const httplib::Request &req, const httplib::Response &res)
|
|
static void log_server_request(const httplib::Request &req, const httplib::Response &res)
|
|
{
|
|
{
|
|
// skip GH copilot requests when using default port
|
|
// skip GH copilot requests when using default port
|
|
@@ -2818,7 +2714,7 @@ int main(int argc, char **argv) {
|
|
// struct that contains llama context and inference
|
|
// struct that contains llama context and inference
|
|
llama_server_context llama;
|
|
llama_server_context llama;
|
|
|
|
|
|
- server_params_parse(argc, argv, sparams, params, llama);
|
|
|
|
|
|
+ server_params_parse(argc, argv, sparams, params);
|
|
|
|
|
|
if (params.model_alias == "unknown")
|
|
if (params.model_alias == "unknown")
|
|
{
|
|
{
|
|
@@ -3150,7 +3046,7 @@ int main(int argc, char **argv) {
|
|
json data = json::parse(req.body);
|
|
json data = json::parse(req.body);
|
|
const int task_id = llama.queue_tasks.get_new_id();
|
|
const int task_id = llama.queue_tasks.get_new_id();
|
|
llama.queue_results.add_waiting_task_id(task_id);
|
|
llama.queue_results.add_waiting_task_id(task_id);
|
|
- llama.request_completion(task_id, data, false, false, -1);
|
|
|
|
|
|
+ llama.request_completion(task_id, data, false, -1);
|
|
if (!json_value(data, "stream", false)) {
|
|
if (!json_value(data, "stream", false)) {
|
|
std::string completion_text;
|
|
std::string completion_text;
|
|
task_result result = llama.queue_results.recv(task_id);
|
|
task_result result = llama.queue_results.recv(task_id);
|
|
@@ -3218,34 +3114,6 @@ int main(int argc, char **argv) {
|
|
}
|
|
}
|
|
});
|
|
});
|
|
|
|
|
|
- svr.Post("/tokenize", [&llama](const httplib::Request &req, httplib::Response &res)
|
|
|
|
- {
|
|
|
|
- res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
|
|
|
- const json body = json::parse(req.body);
|
|
|
|
- std::vector<llama_token> tokens;
|
|
|
|
- if (body.count("content") != 0)
|
|
|
|
- {
|
|
|
|
- tokens = llama.tokenize(body["content"], false);
|
|
|
|
- }
|
|
|
|
- const json data = format_tokenizer_response(tokens);
|
|
|
|
- return res.set_content(data.dump(), "application/json; charset=utf-8");
|
|
|
|
- });
|
|
|
|
-
|
|
|
|
- svr.Post("/detokenize", [&llama](const httplib::Request &req, httplib::Response &res)
|
|
|
|
- {
|
|
|
|
- res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
|
|
|
- const json body = json::parse(req.body);
|
|
|
|
- std::string content;
|
|
|
|
- if (body.count("tokens") != 0)
|
|
|
|
- {
|
|
|
|
- const std::vector<llama_token> tokens = body["tokens"];
|
|
|
|
- content = tokens_to_str(llama.ctx, tokens.cbegin(), tokens.cend());
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- const json data = format_detokenized_response(content);
|
|
|
|
- return res.set_content(data.dump(), "application/json; charset=utf-8");
|
|
|
|
- });
|
|
|
|
-
|
|
|
|
svr.Post("/embedding", [&llama](const httplib::Request &req, httplib::Response &res)
|
|
svr.Post("/embedding", [&llama](const httplib::Request &req, httplib::Response &res)
|
|
{
|
|
{
|
|
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
|
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
|
@@ -3272,7 +3140,7 @@ int main(int argc, char **argv) {
|
|
// create and queue the task
|
|
// create and queue the task
|
|
const int task_id = llama.queue_tasks.get_new_id();
|
|
const int task_id = llama.queue_tasks.get_new_id();
|
|
llama.queue_results.add_waiting_task_id(task_id);
|
|
llama.queue_results.add_waiting_task_id(task_id);
|
|
- llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, false, true, -1);
|
|
|
|
|
|
+ llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, true, -1);
|
|
|
|
|
|
// get the result
|
|
// get the result
|
|
task_result result = llama.queue_results.recv(task_id);
|
|
task_result result = llama.queue_results.recv(task_id);
|