server.cpp 123 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273
  1. // MIT License
  2. // Copyright (c) 2023 Georgi Gerganov
  3. // Permission is hereby granted, free of charge, to any person obtaining a copy
  4. // of this software and associated documentation files (the "Software"), to deal
  5. // in the Software without restriction, including without limitation the rights
  6. // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  7. // copies of the Software, and to permit persons to whom the Software is
  8. // furnished to do so, subject to the following conditions:
  9. // The above copyright notice and this permission notice shall be included in all
  10. // copies or substantial portions of the Software.
  11. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  12. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  14. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  15. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  16. // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  17. // SOFTWARE.
  18. #include "common.h"
  19. #include "llama.h"
  20. #include "grammar-parser.h"
  21. #include "utils.hpp"
  22. #include "../llava/clip.h"
  23. #include "../llava/llava.h"
  24. #include "stb_image.h"
  25. #ifndef NDEBUG
  26. // crash the server in debug mode, otherwise send an http 500 error
  27. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  28. #endif
  29. // increase max payload length to allow use of larger context size
  30. #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
  31. #include "httplib.h"
  32. #include "json.hpp"
  33. #if defined(_WIN32)
  34. #include <windows.h>
  35. #include <errhandlingapi.h>
  36. #endif
  37. #include <cstddef>
  38. #include <thread>
  39. #include <chrono>
  40. #include <condition_variable>
  41. #include <atomic>
  42. #include <signal.h>
  43. using json = nlohmann::json;
  44. struct server_params {
  45. std::string hostname = "127.0.0.1";
  46. std::vector<std::string> api_keys;
  47. std::string public_path = "examples/server/public";
  48. int32_t port = 8080;
  49. int32_t read_timeout = 600;
  50. int32_t write_timeout = 600;
  51. bool slots_endpoint = true;
  52. bool metrics_endpoint = false;
  53. int n_threads_http = -1;
  54. };
  55. bool server_verbose = false;
  56. bool server_log_json = false;
  57. enum stop_type {
  58. STOP_FULL,
  59. STOP_PARTIAL,
  60. };
  61. // TODO: can become bool if we can't find use of more states
  62. enum slot_state {
  63. IDLE,
  64. PROCESSING,
  65. };
  66. enum slot_command {
  67. NONE,
  68. LOAD_PROMPT,
  69. RELEASE,
  70. };
  71. struct slot_params {
  72. bool stream = true;
  73. bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt
  74. uint32_t seed = -1; // RNG seed
  75. int32_t n_keep = 0; // number of tokens to keep from initial prompt
  76. int32_t n_predict = -1; // new tokens to predict
  77. std::vector<std::string> antiprompt;
  78. json input_prefix;
  79. json input_suffix;
  80. };
  81. struct slot_image {
  82. int32_t id;
  83. bool request_encode_image = false;
  84. float * image_embedding = nullptr;
  85. int32_t image_tokens = 0;
  86. clip_image_u8 * img_data;
  87. std::string prefix_prompt; // before of this image
  88. };
  89. struct server_slot {
  90. int id;
  91. int task_id = -1;
  92. struct slot_params params;
  93. slot_state state = IDLE;
  94. slot_command command = NONE;
  95. // used to determine the slot that has been used the longest
  96. int64_t t_last_used = -1;
  97. // generation props
  98. int32_t n_ctx = 0; // context size per slot
  99. int32_t n_past = 0;
  100. int32_t n_decoded = 0;
  101. int32_t n_remaining = -1;
  102. int32_t i_batch = -1;
  103. int32_t n_predict = -1;
  104. int32_t n_prompt_tokens = 0;
  105. int32_t n_prompt_tokens_processed = 0;
  106. json prompt;
  107. std::string generated_text;
  108. llama_token sampled;
  109. std::vector<llama_token> cache_tokens;
  110. std::vector<completion_token_output> generated_token_probs;
  111. bool embedding = false;
  112. bool has_next_token = true;
  113. bool truncated = false;
  114. bool stopped_eos = false;
  115. bool stopped_word = false;
  116. bool stopped_limit = false;
  117. std::string stopping_word;
  118. // sampling
  119. struct llama_sampling_params sparams;
  120. llama_sampling_context *ctx_sampling = nullptr;
  121. int32_t ga_i = 0; // group-attention state
  122. int32_t ga_n = 1; // group-attention factor
  123. int32_t ga_w = 512; // group-attention width
  124. int32_t n_past_se = 0; // self-extend
  125. // multimodal
  126. std::vector<slot_image> images;
  127. // stats
  128. size_t n_sent_text = 0; // number of sent text character
  129. size_t n_sent_token_probs = 0;
  130. int64_t t_start_process_prompt;
  131. int64_t t_start_genereration;
  132. double t_prompt_processing; // ms
  133. double t_token_generation; // ms
  134. // multitasks
  135. int multitask_id = -1;
  136. void reset() {
  137. n_prompt_tokens = 0;
  138. generated_text = "";
  139. truncated = false;
  140. stopped_eos = false;
  141. stopped_word = false;
  142. stopped_limit = false;
  143. stopping_word = "";
  144. n_past = 0;
  145. n_sent_text = 0;
  146. n_sent_token_probs = 0;
  147. ga_i = 0;
  148. n_past_se = 0;
  149. generated_token_probs.clear();
  150. for (slot_image & img : images) {
  151. free(img.image_embedding);
  152. if (img.img_data) {
  153. clip_image_u8_free(img.img_data);
  154. }
  155. img.prefix_prompt = "";
  156. }
  157. images.clear();
  158. }
  159. bool has_budget(gpt_params &global_params) {
  160. if (params.n_predict == -1 && global_params.n_predict == -1) {
  161. return true; // limitless
  162. }
  163. n_remaining = -1;
  164. if (params.n_predict != -1) {
  165. n_remaining = params.n_predict - n_decoded;
  166. } else if (global_params.n_predict != -1) {
  167. n_remaining = global_params.n_predict - n_decoded;
  168. }
  169. return n_remaining > 0; // no budget
  170. }
  171. bool available() const {
  172. return state == IDLE && command == NONE;
  173. }
  174. bool is_processing() const {
  175. return (state == IDLE && command == LOAD_PROMPT) || state == PROCESSING;
  176. }
  177. void add_token_string(const completion_token_output &token) {
  178. if (command == RELEASE) {
  179. return;
  180. }
  181. cache_tokens.push_back(token.tok);
  182. generated_token_probs.push_back(token);
  183. }
  184. void release() {
  185. if (state == PROCESSING)
  186. {
  187. t_token_generation = (ggml_time_us() - t_start_genereration) / 1e3;
  188. command = RELEASE;
  189. }
  190. }
  191. json get_formated_timings() {
  192. return json
  193. {
  194. {"prompt_n", n_prompt_tokens_processed},
  195. {"prompt_ms", t_prompt_processing},
  196. {"prompt_per_token_ms", t_prompt_processing / n_prompt_tokens_processed},
  197. {"prompt_per_second", 1e3 / t_prompt_processing * n_prompt_tokens_processed},
  198. {"predicted_n", n_decoded},
  199. {"predicted_ms", t_token_generation},
  200. {"predicted_per_token_ms", t_token_generation / n_decoded},
  201. {"predicted_per_second", 1e3 / t_token_generation * n_decoded},
  202. };
  203. }
  204. void print_timings() const {
  205. char buffer[512];
  206. double t_token = t_prompt_processing / n_prompt_tokens_processed;
  207. double n_tokens_second = 1e3 / t_prompt_processing * n_prompt_tokens_processed;
  208. sprintf(buffer, "prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)",
  209. t_prompt_processing, n_prompt_tokens_processed,
  210. t_token, n_tokens_second);
  211. LOG_DEBUG(buffer, {
  212. {"slot_id", id},
  213. {"task_id", task_id},
  214. {"t_prompt_processing", t_prompt_processing},
  215. {"n_prompt_tokens_processed", n_prompt_tokens_processed},
  216. {"t_token", t_token},
  217. {"n_tokens_second", n_tokens_second},
  218. });
  219. t_token = t_token_generation / n_decoded;
  220. n_tokens_second = 1e3 / t_token_generation * n_decoded;
  221. sprintf(buffer, "generation eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)",
  222. t_token_generation, n_decoded,
  223. t_token, n_tokens_second);
  224. LOG_DEBUG(buffer, {
  225. {"slot_id", id},
  226. {"task_id", task_id},
  227. {"t_token_generation", t_token_generation},
  228. {"n_decoded", n_decoded},
  229. {"t_token", t_token},
  230. {"n_tokens_second", n_tokens_second},
  231. });
  232. sprintf(buffer, " total time = %10.2f ms", t_prompt_processing + t_token_generation);
  233. LOG_DEBUG(buffer, {
  234. {"slot_id", id},
  235. {"task_id", task_id},
  236. {"t_prompt_processing", t_prompt_processing},
  237. {"t_token_generation", t_token_generation},
  238. {"t_total", t_prompt_processing + t_token_generation},
  239. });
  240. }
  241. };
  242. struct server_metrics {
  243. uint64_t n_prompt_tokens_processed_total = 0;
  244. uint64_t n_tokens_predicted_total = 0;
  245. uint64_t n_prompt_tokens_processed = 0;
  246. uint64_t t_prompt_processing = 0;
  247. uint64_t n_tokens_predicted = 0;
  248. uint64_t t_tokens_generation = 0;
  249. void on_prompt_eval(const server_slot &slot) {
  250. n_prompt_tokens_processed_total += slot.n_prompt_tokens_processed;
  251. n_prompt_tokens_processed += slot.n_prompt_tokens_processed;
  252. t_prompt_processing += slot.t_prompt_processing;
  253. }
  254. void on_prediction(const server_slot &slot) {
  255. n_tokens_predicted_total += slot.n_decoded;
  256. n_tokens_predicted += slot.n_decoded;
  257. t_tokens_generation += slot.t_token_generation;
  258. }
  259. void reset_bucket() {
  260. n_prompt_tokens_processed = 0;
  261. t_prompt_processing = 0;
  262. n_tokens_predicted = 0;
  263. t_tokens_generation = 0;
  264. }
  265. };
  266. struct llama_server_context
  267. {
  268. llama_model *model = nullptr;
  269. float modelProgress = 0.0;
  270. llama_context *ctx = nullptr;
  271. clip_ctx *clp_ctx = nullptr;
  272. gpt_params params;
  273. llama_batch batch;
  274. bool multimodal = false;
  275. bool clean_kv_cache = true;
  276. bool all_slots_are_idle = false;
  277. bool add_bos_token = true;
  278. int32_t n_ctx; // total context for all clients / slots
  279. // system prompt
  280. bool system_need_update = false;
  281. std::string system_prompt;
  282. std::vector<llama_token> system_tokens;
  283. std::string name_user; // this should be the antiprompt
  284. std::string name_assistant;
  285. // slots / clients
  286. std::vector<server_slot> slots;
  287. llama_server_queue queue_tasks;
  288. llama_server_response queue_results;
  289. server_metrics metrics;
  290. ~llama_server_context()
  291. {
  292. if (clp_ctx)
  293. {
  294. LOG_DEBUG("freeing clip model", {});
  295. clip_free(clp_ctx);
  296. clp_ctx = nullptr;
  297. }
  298. if (ctx)
  299. {
  300. llama_free(ctx);
  301. ctx = nullptr;
  302. }
  303. if (model)
  304. {
  305. llama_free_model(model);
  306. model = nullptr;
  307. }
  308. }
  309. bool load_model(const gpt_params &params_)
  310. {
  311. params = params_;
  312. if (!params.mmproj.empty()) {
  313. multimodal = true;
  314. LOG_DEBUG("Multi Modal Mode Enabled", {});
  315. clp_ctx = clip_model_load(params.mmproj.c_str(), /*verbosity=*/ 1);
  316. if(clp_ctx == nullptr) {
  317. LOG_ERROR("unable to load clip model", {{"model", params.mmproj}});
  318. return false;
  319. }
  320. if (params.n_ctx < 2048) { // request larger context for the image embedding
  321. params.n_ctx = 2048;
  322. }
  323. }
  324. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  325. if (model == nullptr)
  326. {
  327. LOG_ERROR("unable to load model", {{"model", params.model}});
  328. return false;
  329. }
  330. if (multimodal) {
  331. const int n_embd_clip = clip_n_mmproj_embd(clp_ctx);
  332. const int n_embd_llm = llama_n_embd(model);
  333. if (n_embd_clip != n_embd_llm) {
  334. LOG_TEE("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_embd_clip, n_embd_llm);
  335. llama_free(ctx);
  336. llama_free_model(model);
  337. return false;
  338. }
  339. }
  340. n_ctx = llama_n_ctx(ctx);
  341. add_bos_token = llama_should_add_bos_token(model);
  342. return true;
  343. }
  344. void initialize() {
  345. // create slots
  346. all_slots_are_idle = true;
  347. const int32_t n_ctx_slot = n_ctx / params.n_parallel;
  348. LOG_DEBUG("initializing slots", {{"n_slots", params.n_parallel}});
  349. for (int i = 0; i < params.n_parallel; i++)
  350. {
  351. server_slot slot;
  352. slot.id = i;
  353. slot.n_ctx = n_ctx_slot;
  354. slot.n_predict = params.n_predict;
  355. LOG_DEBUG("new slot", {
  356. {"slot_id", slot.id},
  357. {"n_ctx_slot", slot.n_ctx}
  358. });
  359. const int ga_n = params.grp_attn_n;
  360. const int ga_w = params.grp_attn_w;
  361. if (ga_n != 1) {
  362. GGML_ASSERT(ga_n > 0 && "ga_n must be positive"); // NOLINT
  363. GGML_ASSERT(ga_w % ga_n == 0 && "ga_w must be a multiple of ga_n"); // NOLINT
  364. //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of ga_w"); // NOLINT
  365. //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * ga_n"); // NOLINT
  366. LOG_DEBUG("slot self-extend", {
  367. {"slot_id", slot.id},
  368. {"ga_n", ga_n},
  369. {"ga_w", ga_w}
  370. });
  371. }
  372. slot.ga_i = 0;
  373. slot.ga_n = ga_n;
  374. slot.ga_w = ga_w;
  375. slot.reset();
  376. slots.push_back(slot);
  377. }
  378. batch = llama_batch_init(n_ctx, 0, params.n_parallel);
  379. }
  380. std::vector<llama_token> tokenize(const json & json_prompt, bool add_bos) const
  381. {
  382. // TODO: currently, we tokenize using special tokens by default
  383. // this is not always correct (see https://github.com/ggerganov/llama.cpp/pull/4160#issuecomment-1824826216)
  384. // but it's better compared to completely ignoring ChatML and other chat templates
  385. const bool TMP_FORCE_SPECIAL = true;
  386. // If `add_bos` is true, we only add BOS, when json_prompt is a string,
  387. // or the first element of the json_prompt array is a string.
  388. std::vector<llama_token> prompt_tokens;
  389. if (json_prompt.is_array())
  390. {
  391. bool first = true;
  392. for (const auto& p : json_prompt)
  393. {
  394. if (p.is_string())
  395. {
  396. auto s = p.template get<std::string>();
  397. std::vector<llama_token> p;
  398. if (first)
  399. {
  400. p = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL);
  401. first = false;
  402. }
  403. else
  404. {
  405. p = ::llama_tokenize(ctx, s, false, TMP_FORCE_SPECIAL);
  406. }
  407. prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
  408. }
  409. else
  410. {
  411. if (first)
  412. {
  413. first = false;
  414. }
  415. prompt_tokens.push_back(p.template get<llama_token>());
  416. }
  417. }
  418. }
  419. else
  420. {
  421. auto s = json_prompt.template get<std::string>();
  422. prompt_tokens = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL);
  423. }
  424. return prompt_tokens;
  425. }
  426. server_slot* get_slot(int id) {
  427. int64_t t_last = ggml_time_us();
  428. server_slot *last_used = nullptr;
  429. for (server_slot & slot : slots)
  430. {
  431. if (slot.id == id && slot.available())
  432. {
  433. return &slot;
  434. }
  435. if (slot.available() && slot.t_last_used < t_last)
  436. {
  437. last_used = &slot;
  438. t_last = slot.t_last_used;
  439. }
  440. }
  441. return last_used;
  442. }
  443. bool launch_slot_with_data(server_slot* &slot, json data) {
  444. slot_params default_params;
  445. llama_sampling_params default_sparams;
  446. slot->params.stream = json_value(data, "stream", false);
  447. slot->params.cache_prompt = json_value(data, "cache_prompt", false);
  448. slot->params.n_predict = json_value(data, "n_predict", default_params.n_predict);
  449. slot->sparams.top_k = json_value(data, "top_k", default_sparams.top_k);
  450. slot->sparams.top_p = json_value(data, "top_p", default_sparams.top_p);
  451. slot->sparams.min_p = json_value(data, "min_p", default_sparams.min_p);
  452. slot->sparams.tfs_z = json_value(data, "tfs_z", default_sparams.tfs_z);
  453. slot->sparams.typical_p = json_value(data, "typical_p", default_sparams.typical_p);
  454. slot->sparams.temp = json_value(data, "temperature", default_sparams.temp);
  455. slot->sparams.dynatemp_range = json_value(data, "dynatemp_range", default_sparams.dynatemp_range);
  456. slot->sparams.dynatemp_exponent = json_value(data, "dynatemp_exponent", default_sparams.dynatemp_exponent);
  457. slot->sparams.penalty_last_n = json_value(data, "repeat_last_n", default_sparams.penalty_last_n);
  458. slot->sparams.penalty_repeat = json_value(data, "repeat_penalty", default_sparams.penalty_repeat);
  459. slot->sparams.penalty_freq = json_value(data, "frequency_penalty", default_sparams.penalty_freq);
  460. slot->sparams.penalty_present = json_value(data, "presence_penalty", default_sparams.penalty_present);
  461. slot->sparams.mirostat = json_value(data, "mirostat", default_sparams.mirostat);
  462. slot->sparams.mirostat_tau = json_value(data, "mirostat_tau", default_sparams.mirostat_tau);
  463. slot->sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
  464. slot->sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
  465. slot->params.n_keep = json_value(data, "n_keep", slot->params.n_keep);
  466. slot->sparams.seed = json_value(data, "seed", default_params.seed);
  467. slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
  468. slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
  469. slot->sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
  470. if (slot->n_predict > 0 && slot->params.n_predict > slot->n_predict) {
  471. // Might be better to reject the request with a 400 ?
  472. LOG_WARNING("Max tokens to predict exceeds server configuration", {
  473. {"params.n_predict", slot->params.n_predict},
  474. {"slot.n_predict", slot->n_predict},
  475. });
  476. slot->params.n_predict = slot->n_predict;
  477. }
  478. if (data.count("input_suffix") != 0)
  479. {
  480. slot->params.input_suffix = data["input_suffix"];
  481. }
  482. else
  483. {
  484. slot->params.input_suffix = "";
  485. }
  486. if (data.count("prompt") != 0)
  487. {
  488. slot->prompt = data["prompt"];
  489. }
  490. else
  491. {
  492. slot->prompt = "";
  493. }
  494. slot->sparams.penalty_prompt_tokens.clear();
  495. slot->sparams.use_penalty_prompt_tokens = false;
  496. const auto &penalty_prompt = data.find("penalty_prompt");
  497. if (penalty_prompt != data.end())
  498. {
  499. if (penalty_prompt->is_string())
  500. {
  501. const auto penalty_prompt_string = penalty_prompt->get<std::string>();
  502. auto penalty_tokens = llama_tokenize(model, penalty_prompt_string, false);
  503. slot->sparams.penalty_prompt_tokens.swap(penalty_tokens);
  504. if (slot->params.n_predict > 0)
  505. {
  506. slot->sparams.penalty_prompt_tokens.reserve(slot->sparams.penalty_prompt_tokens.size() + slot->params.n_predict);
  507. }
  508. slot->sparams.use_penalty_prompt_tokens = true;
  509. }
  510. else if (penalty_prompt->is_array())
  511. {
  512. const auto n_tokens = penalty_prompt->size();
  513. slot->sparams.penalty_prompt_tokens.reserve(n_tokens + std::max(0, slot->params.n_predict));
  514. const int n_vocab = llama_n_vocab(model);
  515. for (const auto &penalty_token : *penalty_prompt)
  516. {
  517. if (penalty_token.is_number_integer())
  518. {
  519. const auto tok = penalty_token.get<llama_token>();
  520. if (tok >= 0 && tok < n_vocab)
  521. {
  522. slot->sparams.penalty_prompt_tokens.push_back(tok);
  523. }
  524. }
  525. }
  526. slot->sparams.use_penalty_prompt_tokens = true;
  527. }
  528. }
  529. slot->sparams.logit_bias.clear();
  530. if (json_value(data, "ignore_eos", false))
  531. {
  532. slot->sparams.logit_bias[llama_token_eos(model)] = -INFINITY;
  533. }
  534. const auto &logit_bias = data.find("logit_bias");
  535. if (logit_bias != data.end() && logit_bias->is_array())
  536. {
  537. const int n_vocab = llama_n_vocab(model);
  538. for (const auto &el : *logit_bias)
  539. {
  540. if (el.is_array() && el.size() == 2)
  541. {
  542. float bias;
  543. if (el[1].is_number())
  544. {
  545. bias = el[1].get<float>();
  546. }
  547. else if (el[1].is_boolean() && !el[1].get<bool>())
  548. {
  549. bias = -INFINITY;
  550. }
  551. else
  552. {
  553. continue;
  554. }
  555. if (el[0].is_number_integer())
  556. {
  557. llama_token tok = el[0].get<llama_token>();
  558. if (tok >= 0 && tok < n_vocab)
  559. {
  560. slot->sparams.logit_bias[tok] = bias;
  561. }
  562. }
  563. else if (el[0].is_string())
  564. {
  565. auto toks = llama_tokenize(model, el[0].get<std::string>(), false);
  566. for (auto tok : toks)
  567. {
  568. slot->sparams.logit_bias[tok] = bias;
  569. }
  570. }
  571. }
  572. }
  573. }
  574. slot->params.antiprompt.clear();
  575. const auto &stop = data.find("stop");
  576. if (stop != data.end() && stop->is_array())
  577. {
  578. for (const auto &word : *stop)
  579. {
  580. if (!word.empty())
  581. {
  582. slot->params.antiprompt.push_back(word);
  583. }
  584. }
  585. }
  586. const auto &samplers_sequence = data.find("samplers");
  587. if (samplers_sequence != data.end() && samplers_sequence->is_array())
  588. {
  589. std::vector<std::string> sampler_names;
  590. for (const auto &sampler_name : *samplers_sequence)
  591. {
  592. if (sampler_name.is_string())
  593. {
  594. sampler_names.emplace_back(sampler_name);
  595. }
  596. }
  597. slot->sparams.samplers_sequence = llama_sampling_types_from_names(sampler_names, false);
  598. }
  599. else
  600. {
  601. slot->sparams.samplers_sequence = default_sparams.samplers_sequence;
  602. }
  603. if (multimodal)
  604. {
  605. const auto &images_data = data.find("image_data");
  606. if (images_data != data.end() && images_data->is_array())
  607. {
  608. for (const auto &img : *images_data)
  609. {
  610. const std::vector<uint8_t> image_buffer = base64_decode(img["data"].get<std::string>());
  611. slot_image img_sl;
  612. img_sl.id = img.count("id") != 0 ? img["id"].get<int>() : slot->images.size();
  613. img_sl.img_data = clip_image_u8_init();
  614. if (!clip_image_load_from_bytes(image_buffer.data(), image_buffer.size(), img_sl.img_data))
  615. {
  616. LOG_ERROR("failed to load image", {
  617. {"slot_id", slot->id},
  618. {"img_sl_id", img_sl.id}
  619. });
  620. return false;
  621. }
  622. LOG_VERBOSE("image loaded", {
  623. {"slot_id", slot->id},
  624. {"img_sl_id", img_sl.id}
  625. });
  626. img_sl.request_encode_image = true;
  627. slot->images.push_back(img_sl);
  628. }
  629. // process prompt
  630. // example: system prompt [img-102] user [img-103] describe [img-134] -> [{id: 102, prefix: 'system prompt '}, {id: 103, prefix: ' user '}, {id: 134, prefix: ' describe '}]}
  631. if (slot->images.size() > 0 && !slot->prompt.is_array())
  632. {
  633. std::string prompt = slot->prompt.get<std::string>();
  634. size_t pos = 0, begin_prefix = 0;
  635. std::string pattern = "[img-";
  636. while ((pos = prompt.find(pattern, pos)) != std::string::npos) {
  637. size_t end_prefix = pos;
  638. pos += pattern.length();
  639. size_t end_pos = prompt.find(']', pos);
  640. if (end_pos != std::string::npos)
  641. {
  642. std::string image_id = prompt.substr(pos, end_pos - pos);
  643. try
  644. {
  645. int img_id = std::stoi(image_id);
  646. bool found = false;
  647. for (slot_image &img : slot->images)
  648. {
  649. if (img.id == img_id) {
  650. found = true;
  651. img.prefix_prompt = prompt.substr(begin_prefix, end_prefix - begin_prefix);
  652. begin_prefix = end_pos + 1;
  653. break;
  654. }
  655. }
  656. if (!found) {
  657. LOG_TEE("ERROR: Image with id: %i, not found.\n", img_id);
  658. slot->images.clear();
  659. return false;
  660. }
  661. } catch (const std::invalid_argument& e) {
  662. LOG_TEE("Invalid image number id in prompt\n");
  663. slot->images.clear();
  664. return false;
  665. }
  666. }
  667. }
  668. slot->prompt = "";
  669. slot->params.input_suffix = prompt.substr(begin_prefix);
  670. slot->params.cache_prompt = false; // multimodal doesn't support cache prompt
  671. }
  672. }
  673. }
  674. if (slot->ctx_sampling != nullptr)
  675. {
  676. llama_sampling_free(slot->ctx_sampling);
  677. }
  678. slot->ctx_sampling = llama_sampling_init(slot->sparams);
  679. slot->command = LOAD_PROMPT;
  680. all_slots_are_idle = false;
  681. LOG_DEBUG("slot is processing task", {
  682. {"slot_id", slot->id},
  683. {"task_id", slot->task_id},
  684. });
  685. return true;
  686. }
  687. void kv_cache_clear() {
  688. // clear the entire KV cache
  689. llama_kv_cache_clear(ctx);
  690. clean_kv_cache = false;
  691. }
  692. void system_prompt_update() {
  693. kv_cache_clear();
  694. system_tokens.clear();
  695. if (!system_prompt.empty()) {
  696. system_tokens = ::llama_tokenize(ctx, system_prompt, true);
  697. llama_batch_clear(batch);
  698. for (int i = 0; i < (int)system_tokens.size(); ++i)
  699. {
  700. llama_batch_add(batch, system_tokens[i], i, { 0 }, false);
  701. }
  702. for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += params.n_batch)
  703. {
  704. const int32_t n_tokens = std::min(params.n_batch, (int32_t) (batch.n_tokens - i));
  705. llama_batch batch_view = {
  706. n_tokens,
  707. batch.token + i,
  708. nullptr,
  709. batch.pos + i,
  710. batch.n_seq_id + i,
  711. batch.seq_id + i,
  712. batch.logits + i,
  713. 0, 0, 0, // unused
  714. };
  715. if (llama_decode(ctx, batch_view) != 0)
  716. {
  717. LOG_TEE("%s: llama_decode() failed\n", __func__);
  718. return;
  719. }
  720. }
  721. // assign the system KV cache to all parallel sequences
  722. for (int32_t i = 1; i < params.n_parallel; ++i)
  723. {
  724. llama_kv_cache_seq_cp(ctx, 0, i, 0, system_tokens.size());
  725. }
  726. }
  727. LOG_TEE("system prompt updated\n");
  728. system_need_update = false;
  729. }
  730. void system_prompt_notify() {
  731. // release all slots
  732. for (server_slot &slot : slots)
  733. {
  734. slot.release();
  735. }
  736. system_need_update = true;
  737. }
  738. static size_t find_stopping_strings(const std::string &text, const size_t last_token_size,
  739. const stop_type type, server_slot &slot)
  740. {
  741. size_t stop_pos = std::string::npos;
  742. for (const std::string &word : slot.params.antiprompt)
  743. {
  744. size_t pos;
  745. if (type == STOP_FULL)
  746. {
  747. const size_t tmp = word.size() + last_token_size;
  748. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  749. pos = text.find(word, from_pos);
  750. }
  751. else
  752. {
  753. pos = find_partial_stop_string(word, text);
  754. }
  755. if (pos != std::string::npos &&
  756. (stop_pos == std::string::npos || pos < stop_pos))
  757. {
  758. if (type == STOP_FULL)
  759. {
  760. slot.stopped_word = true;
  761. slot.stopping_word = word;
  762. slot.has_next_token = false;
  763. }
  764. stop_pos = pos;
  765. }
  766. }
  767. return stop_pos;
  768. }
  769. bool process_token(completion_token_output &result, server_slot &slot) {
  770. // remember which tokens were sampled - used for repetition penalties during sampling
  771. const std::string token_str = llama_token_to_piece(ctx, result.tok);
  772. slot.sampled = result.tok;
  773. // search stop word and delete it
  774. slot.generated_text += token_str;
  775. slot.has_next_token = true;
  776. if (slot.ctx_sampling->params.use_penalty_prompt_tokens && result.tok != -1)
  777. {
  778. // we can change penalty_prompt_tokens because it is always created from scratch each request
  779. slot.ctx_sampling->params.penalty_prompt_tokens.push_back(result.tok);
  780. }
  781. // check if there is incomplete UTF-8 character at the end
  782. bool incomplete = false;
  783. for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i)
  784. {
  785. unsigned char c = slot.generated_text[slot.generated_text.size() - i];
  786. if ((c & 0xC0) == 0x80)
  787. {
  788. // continuation byte: 10xxxxxx
  789. continue;
  790. }
  791. if ((c & 0xE0) == 0xC0)
  792. {
  793. // 2-byte character: 110xxxxx ...
  794. incomplete = i < 2;
  795. }
  796. else if ((c & 0xF0) == 0xE0)
  797. {
  798. // 3-byte character: 1110xxxx ...
  799. incomplete = i < 3;
  800. }
  801. else if ((c & 0xF8) == 0xF0)
  802. {
  803. // 4-byte character: 11110xxx ...
  804. incomplete = i < 4;
  805. }
  806. // else 1-byte character or invalid byte
  807. break;
  808. }
  809. if (!incomplete)
  810. {
  811. size_t pos = std::min(slot.n_sent_text, slot.generated_text.size());
  812. const std::string str_test = slot.generated_text.substr(pos);
  813. bool is_stop_full = false;
  814. size_t stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_FULL, slot);
  815. if (stop_pos != std::string::npos)
  816. {
  817. is_stop_full = true;
  818. slot.generated_text.erase(
  819. slot.generated_text.begin() + pos + stop_pos,
  820. slot.generated_text.end());
  821. pos = std::min(slot.n_sent_text, slot.generated_text.size());
  822. }
  823. else
  824. {
  825. is_stop_full = false;
  826. stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_PARTIAL, slot);
  827. }
  828. // check if there is any token to predict
  829. if (stop_pos == std::string::npos || (!slot.has_next_token && !is_stop_full && stop_pos > 0))
  830. {
  831. // no send the stop word in the response
  832. result.text_to_send = slot.generated_text.substr(pos, std::string::npos);
  833. slot.n_sent_text += result.text_to_send.size();
  834. // add the token to slot queue and cache
  835. }
  836. if (slot.params.stream)
  837. {
  838. send_partial_response(slot, result);
  839. }
  840. }
  841. slot.add_token_string(result);
  842. if (incomplete)
  843. {
  844. slot.has_next_token = true;
  845. }
  846. // check the limits
  847. if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params))
  848. {
  849. slot.stopped_limit = true;
  850. slot.has_next_token = false;
  851. }
  852. if (!slot.cache_tokens.empty() && llama_token_is_eog(model, result.tok))
  853. {
  854. slot.stopped_eos = true;
  855. slot.has_next_token = false;
  856. LOG_VERBOSE("eos token found", {});
  857. }
  858. LOG_VERBOSE("next token", {
  859. {"token", result.tok},
  860. {"token_text", tokens_to_output_formatted_string(ctx, result.tok)},
  861. {"has_next_token", slot.has_next_token},
  862. {"n_remain", slot.n_remaining},
  863. {"num_tokens_predicted", slot.n_decoded},
  864. {"stopped_eos", slot.stopped_eos},
  865. {"stopped_word", slot.stopped_word},
  866. {"stopped_limit", slot.stopped_limit},
  867. {"stopping_word", slot.stopping_word},
  868. });
  869. return slot.has_next_token; // continue
  870. }
  871. bool process_images(server_slot &slot) const
  872. {
  873. for (slot_image &img : slot.images)
  874. {
  875. if (!img.request_encode_image)
  876. {
  877. continue;
  878. }
  879. if (!llava_image_embed_make_with_clip_img(clp_ctx, params.n_threads, img.img_data, &img.image_embedding, &img.image_tokens)) {
  880. LOG_TEE("Error processing the given image");
  881. return false;
  882. }
  883. img.request_encode_image = false;
  884. }
  885. return slot.images.size() > 0;
  886. }
  887. void send_error(task_server& task, const std::string &error)
  888. {
  889. LOG_TEE("task %i - error: %s\n", task.id, error.c_str());
  890. task_result res;
  891. res.id = task.id;
  892. res.multitask_id = task.multitask_id;
  893. res.stop = false;
  894. res.error = true;
  895. res.result_json = { { "content", error } };
  896. queue_results.send(res);
  897. }
  898. json get_formated_generation(server_slot &slot)
  899. {
  900. const auto eos_bias = slot.sparams.logit_bias.find(llama_token_eos(model));
  901. const bool ignore_eos = eos_bias != slot.sparams.logit_bias.end() &&
  902. eos_bias->second < 0.0f && std::isinf(eos_bias->second);
  903. std::vector<std::string> samplers_sequence;
  904. for (const auto &sampler_type : slot.sparams.samplers_sequence)
  905. {
  906. samplers_sequence.emplace_back(llama_sampling_type_to_str(sampler_type));
  907. }
  908. return json {
  909. {"n_ctx", slot.n_ctx},
  910. {"n_predict", slot.n_predict},
  911. {"model", params.model_alias},
  912. {"seed", slot.params.seed},
  913. {"temperature", slot.sparams.temp},
  914. {"dynatemp_range", slot.sparams.dynatemp_range},
  915. {"dynatemp_exponent", slot.sparams.dynatemp_exponent},
  916. {"top_k", slot.sparams.top_k},
  917. {"top_p", slot.sparams.top_p},
  918. {"min_p", slot.sparams.min_p},
  919. {"tfs_z", slot.sparams.tfs_z},
  920. {"typical_p", slot.sparams.typical_p},
  921. {"repeat_last_n", slot.sparams.penalty_last_n},
  922. {"repeat_penalty", slot.sparams.penalty_repeat},
  923. {"presence_penalty", slot.sparams.penalty_present},
  924. {"frequency_penalty", slot.sparams.penalty_freq},
  925. {"penalty_prompt_tokens", slot.sparams.penalty_prompt_tokens},
  926. {"use_penalty_prompt_tokens", slot.sparams.use_penalty_prompt_tokens},
  927. {"mirostat", slot.sparams.mirostat},
  928. {"mirostat_tau", slot.sparams.mirostat_tau},
  929. {"mirostat_eta", slot.sparams.mirostat_eta},
  930. {"penalize_nl", slot.sparams.penalize_nl},
  931. {"stop", slot.params.antiprompt},
  932. {"n_predict", slot.params.n_predict},
  933. {"n_keep", params.n_keep},
  934. {"ignore_eos", ignore_eos},
  935. {"stream", slot.params.stream},
  936. {"logit_bias", slot.sparams.logit_bias},
  937. {"n_probs", slot.sparams.n_probs},
  938. {"min_keep", slot.sparams.min_keep},
  939. {"grammar", slot.sparams.grammar},
  940. {"samplers", samplers_sequence}
  941. };
  942. }
  943. void send_partial_response(server_slot &slot, completion_token_output tkn)
  944. {
  945. task_result res;
  946. res.id = slot.task_id;
  947. res.multitask_id = slot.multitask_id;
  948. res.error = false;
  949. res.stop = false;
  950. res.result_json = json
  951. {
  952. {"stop", false},
  953. {"slot_id", slot.id},
  954. {"multimodal", multimodal}
  955. };
  956. if (!llama_token_is_eog(model, tkn.tok)) {
  957. res.result_json["content"] = tkn.text_to_send;
  958. }
  959. if (slot.sparams.n_probs > 0)
  960. {
  961. std::vector<completion_token_output> probs_output = {};
  962. const std::vector<llama_token> to_send_toks = llama_tokenize(ctx, tkn.text_to_send, false);
  963. size_t probs_pos = std::min(slot.n_sent_token_probs, slot.generated_token_probs.size());
  964. size_t probs_stop_pos = std::min(slot.n_sent_token_probs + to_send_toks.size(), slot.generated_token_probs.size());
  965. if (probs_pos < probs_stop_pos)
  966. {
  967. probs_output = std::vector<completion_token_output>(slot.generated_token_probs.begin() + probs_pos, slot.generated_token_probs.begin() + probs_stop_pos);
  968. }
  969. slot.n_sent_token_probs = probs_stop_pos;
  970. res.result_json["completion_probabilities"] = probs_vector_to_json(ctx, probs_output);
  971. }
  972. queue_results.send(res);
  973. }
  974. void send_final_response(server_slot &slot)
  975. {
  976. task_result res;
  977. res.id = slot.task_id;
  978. res.multitask_id = slot.multitask_id;
  979. res.error = false;
  980. res.stop = true;
  981. res.result_json = json
  982. {
  983. {"content", !slot.params.stream ? slot.generated_text : ""},
  984. {"slot_id", slot.id},
  985. {"stop", true},
  986. {"model", params.model_alias},
  987. {"tokens_predicted", slot.n_decoded},
  988. {"tokens_evaluated", slot.n_prompt_tokens},
  989. {"truncated", slot.truncated},
  990. {"stopped_eos", slot.stopped_eos},
  991. {"stopped_word", slot.stopped_word},
  992. {"stopped_limit", slot.stopped_limit},
  993. {"stopping_word", slot.stopping_word},
  994. {"tokens_cached", slot.n_past},
  995. {"timings", slot.get_formated_timings()}
  996. };
  997. if (slot.sparams.n_probs > 0)
  998. {
  999. std::vector<completion_token_output> probs = {};
  1000. if (!slot.params.stream && slot.stopped_word)
  1001. {
  1002. const std::vector<llama_token> stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false);
  1003. probs = std::vector<completion_token_output>(slot.generated_token_probs.begin(), slot.generated_token_probs.end() - stop_word_toks.size());
  1004. }
  1005. else
  1006. {
  1007. probs = std::vector<completion_token_output>(
  1008. slot.generated_token_probs.begin(),
  1009. slot.generated_token_probs.end());
  1010. }
  1011. res.result_json["completion_probabilities"] = probs_vector_to_json(ctx, probs);
  1012. }
  1013. queue_results.send(res);
  1014. }
  1015. void send_embedding(server_slot & slot, const llama_batch & batch)
  1016. {
  1017. task_result res;
  1018. res.id = slot.task_id;
  1019. res.multitask_id = slot.multitask_id;
  1020. res.error = false;
  1021. res.stop = true;
  1022. const int n_embd = llama_n_embd(model);
  1023. if (!params.embedding)
  1024. {
  1025. LOG_WARNING("embedding disabled", {{"params.embedding", params.embedding}});
  1026. res.result_json = json
  1027. {
  1028. {"embedding", std::vector<float>(n_embd, 0.0f)},
  1029. };
  1030. }
  1031. else
  1032. {
  1033. for (int i = 0; i < batch.n_tokens; ++i) {
  1034. if (!batch.logits[i] || batch.seq_id[i][0] != slot.id) {
  1035. continue;
  1036. }
  1037. const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
  1038. if (embd == NULL) {
  1039. embd = llama_get_embeddings_ith(ctx, i);
  1040. if (embd == NULL) {
  1041. LOG_ERROR("failed to get embeddings for token", {{"token", batch.token[i]}, {"seq_id", batch.seq_id[i][0]}});
  1042. res.result_json = json
  1043. {
  1044. {"embedding", std::vector<float>(n_embd, 0.0f)},
  1045. };
  1046. continue;
  1047. }
  1048. }
  1049. res.result_json = json
  1050. {
  1051. {"embedding", std::vector<float>(embd, embd + n_embd)},
  1052. {"timings", slot.get_formated_timings()},
  1053. };
  1054. }
  1055. }
  1056. queue_results.send(res);
  1057. }
  1058. void request_completion(int task_id, json data, bool embedding, int multitask_id)
  1059. {
  1060. task_server task;
  1061. task.id = task_id;
  1062. task.target_id = 0;
  1063. task.data = std::move(data);
  1064. task.embedding_mode = embedding;
  1065. task.type = TASK_TYPE_COMPLETION;
  1066. task.multitask_id = multitask_id;
  1067. // when a completion task's prompt array is not a singleton, we split it into multiple requests
  1068. // otherwise, it's a single-prompt task, we actually queue it
  1069. // if there's numbers in the prompt array it will be treated as an array of tokens
  1070. if (task.data.count("prompt") != 0 && task.data.at("prompt").size() > 1) {
  1071. bool numbers = false;
  1072. for (const auto& e : task.data.at("prompt")) {
  1073. if (e.is_number()) {
  1074. numbers = true;
  1075. break;
  1076. }
  1077. }
  1078. // NOTE: split_multiprompt_task() does not handle a mix of strings and numbers,
  1079. // it will completely stall the server. I don't know where the bug for this is.
  1080. //
  1081. // if there are numbers, it needs to be treated like a single prompt,
  1082. // queue_tasks handles a mix of strings and numbers just fine.
  1083. if (numbers) {
  1084. queue_tasks.post(task);
  1085. } else {
  1086. split_multiprompt_task(task_id, task);
  1087. }
  1088. } else {
  1089. // an empty prompt can make slot become buggy
  1090. if (task.data.contains("prompt") && task.data["prompt"].is_string() && task.data["prompt"].get<std::string>().empty()) {
  1091. task.data["prompt"] = " "; // add a space so that we have one token
  1092. }
  1093. queue_tasks.post(task);
  1094. }
  1095. }
  1096. // for multiple images processing
  1097. bool ingest_images(server_slot &slot, int n_batch)
  1098. {
  1099. int image_idx = 0;
  1100. while (image_idx < (int) slot.images.size())
  1101. {
  1102. slot_image &img = slot.images[image_idx];
  1103. // process prefix prompt
  1104. for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch)
  1105. {
  1106. const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i));
  1107. llama_batch batch_view = {
  1108. n_tokens,
  1109. batch.token + i,
  1110. nullptr,
  1111. batch.pos + i,
  1112. batch.n_seq_id + i,
  1113. batch.seq_id + i,
  1114. batch.logits + i,
  1115. 0, 0, 0, // unused
  1116. };
  1117. if (llama_decode(ctx, batch_view))
  1118. {
  1119. LOG_TEE("%s : failed to eval\n", __func__);
  1120. return false;
  1121. }
  1122. }
  1123. // process image with llm
  1124. for (int i = 0; i < img.image_tokens; i += n_batch)
  1125. {
  1126. int n_eval = img.image_tokens - i;
  1127. if (n_eval > n_batch)
  1128. {
  1129. n_eval = n_batch;
  1130. }
  1131. const int n_embd = llama_n_embd(model);
  1132. llama_batch batch_img = {
  1133. n_eval,
  1134. nullptr,
  1135. (img.image_embedding + i * n_embd),
  1136. nullptr,
  1137. nullptr,
  1138. nullptr,
  1139. nullptr,
  1140. slot.n_past,
  1141. 1, 0
  1142. };
  1143. if (llama_decode(ctx, batch_img))
  1144. {
  1145. LOG_TEE("%s : failed to eval image\n", __func__);
  1146. return false;
  1147. }
  1148. slot.n_past += n_eval;
  1149. }
  1150. image_idx++;
  1151. llama_batch_clear(batch);
  1152. // append prefix of next image
  1153. const auto json_prompt = (image_idx >= (int) slot.images.size()) ?
  1154. slot.params.input_suffix : // no more images, then process suffix prompt
  1155. (json)(slot.images[image_idx].prefix_prompt);
  1156. std::vector<llama_token> append_tokens = tokenize(json_prompt, false); // has next image
  1157. for (int i = 0; i < (int) append_tokens.size(); ++i)
  1158. {
  1159. llama_batch_add(batch, append_tokens[i], system_tokens.size() + slot.n_past, { slot.id }, true);
  1160. slot.n_past += 1;
  1161. }
  1162. }
  1163. return true;
  1164. }
  1165. void request_cancel(int task_id)
  1166. {
  1167. task_server task;
  1168. task.type = TASK_TYPE_CANCEL;
  1169. task.target_id = task_id;
  1170. queue_tasks.post(task);
  1171. }
  1172. void split_multiprompt_task(int multitask_id, task_server& multiprompt_task)
  1173. {
  1174. int prompt_count = multiprompt_task.data.at("prompt").size();
  1175. if (prompt_count <= 1) {
  1176. send_error(multiprompt_task, "error while handling multiple prompts");
  1177. return;
  1178. }
  1179. // generate all the ID for subtask
  1180. std::vector<int> subtask_ids(prompt_count);
  1181. for (int i = 0; i < prompt_count; i++)
  1182. {
  1183. subtask_ids[i] = queue_tasks.get_new_id();
  1184. }
  1185. // queue up the multitask so we can track its subtask progression
  1186. queue_tasks.add_multitask(multitask_id, subtask_ids);
  1187. // add subtasks
  1188. for (int i = 0; i < prompt_count; i++)
  1189. {
  1190. json subtask_data = multiprompt_task.data;
  1191. subtask_data["prompt"] = subtask_data["prompt"][i];
  1192. // subtasks inherit everything else (embedding mode, etc.)
  1193. request_completion(subtask_ids[i], subtask_data, multiprompt_task.embedding_mode, multitask_id);
  1194. }
  1195. }
  1196. std::string common_prefix(const std::string& str1, const std::string& str2) {
  1197. auto mismatch_pair = std::mismatch(str1.begin(), str1.end(), str2.begin());
  1198. return std::string(str1.begin(), mismatch_pair.first);
  1199. }
  1200. // Find the slot that has the greatest common prefix
  1201. server_slot *prefix_slot(const json &prompt) {
  1202. if (!prompt.is_string()) {
  1203. return nullptr;
  1204. }
  1205. std::string prompt_str = prompt.get<std::string>();
  1206. server_slot *slot = nullptr;
  1207. size_t longest = 0;
  1208. for (server_slot &s : slots) {
  1209. if (s.available() && s.prompt.is_string()) {
  1210. std::string s_prompt = s.prompt.get<std::string>();
  1211. std::string prefix = common_prefix(s_prompt, prompt_str);
  1212. if (prefix.size() > longest) {
  1213. slot = &s;
  1214. longest = prefix.size();
  1215. }
  1216. }
  1217. }
  1218. if (!slot) {
  1219. return get_slot(-1);
  1220. }
  1221. LOG_DEBUG("slot with common prefix found", {{
  1222. "slot_id", slot->id,
  1223. "characters", longest
  1224. }});
  1225. return slot;
  1226. }
  1227. void process_single_task(task_server& task)
  1228. {
  1229. switch (task.type)
  1230. {
  1231. case TASK_TYPE_COMPLETION: {
  1232. server_slot *slot = prefix_slot(task.data["prompt"]);
  1233. if (slot == nullptr)
  1234. {
  1235. // if no slot is available, we defer this task for processing later
  1236. LOG_VERBOSE("no slot is available", {{"task_id", task.id}});
  1237. queue_tasks.defer(task);
  1238. break;
  1239. }
  1240. slot->reset();
  1241. slot->embedding = task.embedding_mode;
  1242. slot->task_id = task.id;
  1243. slot->multitask_id = task.multitask_id;
  1244. if (!launch_slot_with_data(slot, task.data))
  1245. {
  1246. // send error result
  1247. send_error(task, "internal_error");
  1248. break;
  1249. }
  1250. } break;
  1251. case TASK_TYPE_CANCEL: { // release slot linked with the task id
  1252. for (auto & slot : slots)
  1253. {
  1254. if (slot.task_id == task.target_id)
  1255. {
  1256. slot.release();
  1257. break;
  1258. }
  1259. }
  1260. } break;
  1261. case TASK_TYPE_NEXT_RESPONSE: {
  1262. // do nothing
  1263. } break;
  1264. case TASK_TYPE_METRICS: {
  1265. json slots_data = json::array();
  1266. int n_idle_slots = 0;
  1267. int n_processing_slots = 0;
  1268. for (server_slot &slot: slots) {
  1269. json slot_data = get_formated_generation(slot);
  1270. slot_data["id"] = slot.id;
  1271. slot_data["task_id"] = slot.task_id;
  1272. slot_data["state"] = slot.state;
  1273. slot_data["prompt"] = slot.prompt;
  1274. slot_data["next_token"] = {
  1275. {"has_next_token", slot.has_next_token},
  1276. {"n_remain", slot.n_remaining},
  1277. {"num_tokens_predicted", slot.n_decoded},
  1278. {"stopped_eos", slot.stopped_eos},
  1279. {"stopped_word", slot.stopped_word},
  1280. {"stopped_limit", slot.stopped_limit},
  1281. {"stopping_word", slot.stopping_word},
  1282. };
  1283. if (slot_data["state"] == IDLE) {
  1284. n_idle_slots++;
  1285. } else {
  1286. n_processing_slots++;
  1287. }
  1288. slots_data.push_back(slot_data);
  1289. }
  1290. LOG_DEBUG("slot data", {
  1291. {"task_id", task.id},
  1292. {"n_idle_slots", n_idle_slots},
  1293. {"n_processing_slots", n_processing_slots}
  1294. });
  1295. LOG_VERBOSE("slot data", {
  1296. {"task_id", task.id},
  1297. {"n_idle_slots", n_idle_slots},
  1298. {"n_processing_slots", n_processing_slots},
  1299. {"slots", slots_data}
  1300. });
  1301. task_result res;
  1302. res.id = task.id;
  1303. res.multitask_id = task.multitask_id;
  1304. res.stop = true;
  1305. res.error = false;
  1306. res.result_json = {
  1307. { "idle", n_idle_slots },
  1308. { "processing", n_processing_slots },
  1309. { "deferred", queue_tasks.queue_tasks_deferred.size() },
  1310. { "n_prompt_tokens_processed_total", metrics.n_prompt_tokens_processed_total},
  1311. { "n_tokens_predicted_total", metrics.n_tokens_predicted_total},
  1312. { "n_prompt_tokens_processed", metrics.n_prompt_tokens_processed},
  1313. { "t_prompt_processing", metrics.t_prompt_processing},
  1314. { "n_tokens_predicted", metrics.n_tokens_predicted},
  1315. { "t_tokens_generation", metrics.t_tokens_generation},
  1316. { "kv_cache_tokens_count", llama_get_kv_cache_token_count(ctx)},
  1317. { "kv_cache_used_cells", llama_get_kv_cache_used_cells(ctx)},
  1318. { "slots", slots_data },
  1319. };
  1320. metrics.reset_bucket();
  1321. queue_results.send(res);
  1322. } break;
  1323. }
  1324. }
  1325. void on_finish_multitask(task_multi& multitask)
  1326. {
  1327. // all subtasks done == multitask is done
  1328. task_result result;
  1329. result.id = multitask.id;
  1330. result.stop = true;
  1331. result.error = false;
  1332. // collect json results into one json result
  1333. std::vector<json> result_jsons;
  1334. for (auto& subres : multitask.results)
  1335. {
  1336. result_jsons.push_back(subres.result_json);
  1337. result.error = result.error && subres.error;
  1338. }
  1339. result.result_json = json{ { "results", result_jsons } };
  1340. queue_results.send(result);
  1341. }
  1342. bool update_slots() {
  1343. if (system_need_update)
  1344. {
  1345. LOG_DEBUG("updating system prompt", {});
  1346. system_prompt_update();
  1347. }
  1348. llama_batch_clear(batch);
  1349. if (all_slots_are_idle)
  1350. {
  1351. if (system_prompt.empty() && clean_kv_cache)
  1352. {
  1353. LOG_DEBUG("all slots are idle and system prompt is empty, clear the KV cache", {});
  1354. kv_cache_clear();
  1355. }
  1356. return true;
  1357. }
  1358. LOG_VERBOSE("posting NEXT_RESPONSE", {});
  1359. task_server task;
  1360. task.type = TASK_TYPE_NEXT_RESPONSE;
  1361. task.target_id = -1;
  1362. queue_tasks.post(task);
  1363. for (server_slot &slot : slots)
  1364. {
  1365. if (slot.ga_n == 1)
  1366. {
  1367. if (slot.is_processing() && system_tokens.size() + slot.cache_tokens.size() >= (size_t) slot.n_ctx)
  1368. {
  1369. // Shift context
  1370. const int n_keep = slot.params.n_keep + add_bos_token;
  1371. const int n_left = (int) system_tokens.size() + slot.n_past - n_keep;
  1372. const int n_discard = n_left / 2;
  1373. LOG_DEBUG("slot context shift", {
  1374. {"slot_id", slot.id},
  1375. {"task_id", slot.task_id},
  1376. {"n_keep", n_keep},
  1377. {"n_left", n_left},
  1378. {"n_discard", n_discard},
  1379. {"n_ctx", n_ctx},
  1380. {"n_past", slot.n_past},
  1381. {"n_system_tokens", system_tokens.size()},
  1382. {"n_cache_tokens", slot.cache_tokens.size()}
  1383. });
  1384. llama_kv_cache_seq_rm (ctx, slot.id, n_keep , n_keep + n_discard);
  1385. llama_kv_cache_seq_add(ctx, slot.id, n_keep + n_discard, system_tokens.size() + slot.n_past, -n_discard);
  1386. for (size_t i = n_keep + n_discard; i < slot.cache_tokens.size(); i++)
  1387. {
  1388. slot.cache_tokens[i - n_discard] = slot.cache_tokens[i];
  1389. }
  1390. slot.cache_tokens.resize(slot.cache_tokens.size() - n_discard);
  1391. slot.n_past -= n_discard;
  1392. slot.truncated = true;
  1393. }
  1394. }
  1395. }
  1396. // decode any currently ongoing sequences
  1397. LOG_VERBOSE("decoding ongoing sequences", {});
  1398. for (auto & slot : slots)
  1399. {
  1400. // release the slot
  1401. if (slot.command == RELEASE)
  1402. {
  1403. slot.state = IDLE;
  1404. slot.command = NONE;
  1405. slot.t_last_used = ggml_time_us();
  1406. LOG_DEBUG("slot released", {
  1407. {"slot_id", slot.id},
  1408. {"task_id", slot.task_id},
  1409. {"n_ctx", n_ctx},
  1410. {"n_past", slot.n_past},
  1411. {"n_system_tokens", system_tokens.size()},
  1412. {"n_cache_tokens", slot.cache_tokens.size()},
  1413. {"truncated", slot.truncated}
  1414. });
  1415. queue_tasks.notify_slot_changed();
  1416. continue;
  1417. }
  1418. if (slot.state == IDLE)
  1419. {
  1420. continue;
  1421. }
  1422. slot.i_batch = batch.n_tokens;
  1423. const int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1424. // TODO: we always have to take into account the "system_tokens"
  1425. // this is not great and needs to be improved somehow
  1426. llama_batch_add(batch, slot.sampled, system_tokens.size() + slot_npast, { slot.id }, true);
  1427. slot.n_past += 1;
  1428. }
  1429. // process in chunks of params.n_batch
  1430. int32_t n_batch = params.n_batch;
  1431. // assign workload to the slots
  1432. if (params.cont_batching || batch.n_tokens == 0)
  1433. {
  1434. for (auto & slot : slots)
  1435. {
  1436. const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get<std::string>().empty()) || !slot.images.empty();
  1437. // empty prompt passed -> release the slot and send empty response
  1438. if (slot.state == IDLE && slot.command == LOAD_PROMPT && !has_prompt)
  1439. {
  1440. slot.release();
  1441. slot.print_timings();
  1442. send_final_response(slot);
  1443. continue;
  1444. }
  1445. // need process the prompt
  1446. if (slot.state == IDLE && slot.command == LOAD_PROMPT)
  1447. {
  1448. slot.state = PROCESSING;
  1449. slot.command = NONE;
  1450. std::vector<llama_token> prompt_tokens;
  1451. slot.t_start_process_prompt = ggml_time_us();
  1452. slot.t_start_genereration = 0;
  1453. prompt_tokens = tokenize(slot.prompt, system_prompt.empty()); // add BOS if there isn't system prompt
  1454. slot.n_prompt_tokens = prompt_tokens.size();
  1455. if (slot.params.n_keep < 0)
  1456. {
  1457. slot.params.n_keep = slot.n_prompt_tokens;
  1458. }
  1459. slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep);
  1460. // if input prompt is too big, truncate it, if group attention self-extend is disabled
  1461. if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx)
  1462. {
  1463. const int n_left = slot.n_ctx - slot.params.n_keep;
  1464. const int n_shift = n_left / 2;
  1465. const int n_erase = slot.n_prompt_tokens - slot.params.n_keep - n_shift;
  1466. std::vector<llama_token> new_tokens(
  1467. prompt_tokens.begin(),
  1468. prompt_tokens.begin() + slot.params.n_keep);
  1469. new_tokens.insert(
  1470. new_tokens.end(),
  1471. prompt_tokens.begin() + slot.params.n_keep + n_erase,
  1472. prompt_tokens.end());
  1473. LOG_INFO("input truncated", {
  1474. {"n_ctx", slot.n_ctx},
  1475. {"n_keep", slot.params.n_keep},
  1476. {"n_left", n_left},
  1477. {"n_shift", n_shift},
  1478. {"n_erase", n_erase},
  1479. });
  1480. slot.truncated = true;
  1481. prompt_tokens = new_tokens;
  1482. slot.n_prompt_tokens = prompt_tokens.size();
  1483. GGML_ASSERT(slot.n_prompt_tokens < slot.n_ctx);
  1484. }
  1485. if (!slot.params.cache_prompt)
  1486. {
  1487. llama_sampling_reset(slot.ctx_sampling);
  1488. slot.n_past = 0;
  1489. slot.n_past_se = 0;
  1490. slot.ga_i = 0;
  1491. slot.n_prompt_tokens_processed = slot.n_prompt_tokens;
  1492. }
  1493. else
  1494. {
  1495. // push the prompt into the sampling context (do not apply grammar)
  1496. for (auto &token : prompt_tokens)
  1497. {
  1498. llama_sampling_accept(slot.ctx_sampling, ctx, token, false);
  1499. }
  1500. slot.n_past = common_part(slot.cache_tokens, prompt_tokens);
  1501. // the last token of the cache is not in the KV cache until the next call to llama_decode
  1502. // (it was sampled, pushed into the "cache_tokens", but not yet put in the context)
  1503. if (slot.n_past > 0 && slot.n_past == (int32_t) slot.cache_tokens.size())
  1504. {
  1505. slot.n_past -= 1;
  1506. }
  1507. slot.n_prompt_tokens_processed = slot.n_prompt_tokens;
  1508. if (slot.ga_n != 1)
  1509. {
  1510. int ga_i = 0;
  1511. int32_t ga_n = slot.ga_n;
  1512. int32_t ga_w = slot.ga_w;
  1513. int32_t slot_npast = 0;
  1514. for (int k = 0; k < slot.n_past; ++k)
  1515. {
  1516. while (slot_npast >= ga_i + ga_w) {
  1517. const int bd = (ga_w/ga_n)*(ga_n - 1);
  1518. slot_npast -= bd;
  1519. ga_i += ga_w/ga_n;
  1520. }
  1521. slot_npast++;
  1522. }
  1523. slot.n_past_se = slot_npast;
  1524. slot.ga_i = ga_i;
  1525. }
  1526. LOG_DEBUG("slot progression", {
  1527. { "slot_id", slot.id },
  1528. { "task_id", slot.task_id },
  1529. { "n_past", slot.n_past },
  1530. { "n_past_se", slot.n_past_se },
  1531. { "ga_i", slot.ga_i },
  1532. { "n_prompt_tokens_processed", slot.n_prompt_tokens_processed }
  1533. });
  1534. }
  1535. slot.cache_tokens = prompt_tokens;
  1536. if (slot.n_past == slot.n_prompt_tokens && slot.n_past > 0)
  1537. {
  1538. // we have to evaluate at least 1 token to generate logits.
  1539. LOG_DEBUG("we have to evaluate at least 1 token to generate logits", {
  1540. { "slot_id", slot.id },
  1541. { "task_id", slot.task_id }
  1542. });
  1543. slot.n_past--;
  1544. if (slot.ga_i > 0)
  1545. {
  1546. slot.n_past_se--;
  1547. }
  1548. }
  1549. int p0 = (int) system_tokens.size() + slot.n_past;
  1550. LOG_DEBUG("kv cache rm [p0, end)", {
  1551. { "slot_id", slot.id },
  1552. { "task_id", slot.task_id },
  1553. { "p0", p0 }
  1554. });
  1555. llama_kv_cache_seq_rm(ctx, slot.id, p0, -1);
  1556. LOG_VERBOSE("prompt ingested", {
  1557. {"n_past", slot.n_past},
  1558. {"cached", tokens_to_str(ctx, slot.cache_tokens.cbegin(), slot.cache_tokens.cbegin() + slot.n_past)},
  1559. {"to_eval", tokens_to_str(ctx, slot.cache_tokens.cbegin() + slot.n_past, slot.cache_tokens.cend())},
  1560. });
  1561. const bool has_images = process_images(slot);
  1562. // process the prefix of first image
  1563. std::vector<llama_token> prefix_tokens = has_images ? tokenize(slot.images[0].prefix_prompt, add_bos_token) : prompt_tokens;
  1564. int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1565. int32_t ga_i = slot.ga_i;
  1566. int32_t ga_n = slot.ga_n;
  1567. int32_t ga_w = slot.ga_w;
  1568. for (; slot.n_past < (int) prefix_tokens.size(); ++slot.n_past)
  1569. {
  1570. if (slot.ga_n != 1)
  1571. {
  1572. while (slot_npast >= ga_i + ga_w) {
  1573. const int bd = (ga_w/ga_n)*(ga_n - 1);
  1574. slot_npast -= bd;
  1575. ga_i += ga_w/ga_n;
  1576. }
  1577. }
  1578. llama_batch_add(batch, prefix_tokens[slot.n_past], system_tokens.size() + slot_npast, { slot.id }, false);
  1579. slot_npast++;
  1580. }
  1581. if (has_images && !ingest_images(slot, n_batch))
  1582. {
  1583. LOG_ERROR("failed processing images", {
  1584. {"slot_id", slot.id},
  1585. {"task_id", slot.task_id},
  1586. });
  1587. // FIXME @phymbert: to be properly tested
  1588. // early returning without changing the slot state will block the slot for ever
  1589. // no one at the moment is checking the return value
  1590. return false;
  1591. }
  1592. // extract the logits only for the last token
  1593. if (batch.n_tokens > 0)
  1594. {
  1595. batch.logits[batch.n_tokens - 1] = true;
  1596. }
  1597. slot.n_decoded = 0;
  1598. slot.i_batch = batch.n_tokens - 1;
  1599. }
  1600. }
  1601. }
  1602. if (batch.n_tokens == 0)
  1603. {
  1604. all_slots_are_idle = true;
  1605. return true;
  1606. }
  1607. for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch)
  1608. {
  1609. const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i);
  1610. for (auto & slot : slots)
  1611. {
  1612. if (slot.ga_n != 1)
  1613. {
  1614. // context extension via Self-Extend
  1615. while (slot.n_past_se >= slot.ga_i + slot.ga_w)
  1616. {
  1617. const int ib = (slot.ga_n * slot.ga_i) / slot.ga_w;
  1618. const int bd = (slot.ga_w / slot.ga_n) * (slot.ga_n - 1);
  1619. const int dd = (slot.ga_w / slot.ga_n) - ib * bd - slot.ga_w;
  1620. LOG_TEE("\n");
  1621. LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i, slot.n_past_se, ib * bd, slot.ga_i + ib * bd, slot.n_past_se + ib * bd);
  1622. LOG_TEE("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w, slot.ga_n, (slot.ga_i + ib * bd) / slot.ga_n, (slot.ga_i + ib * bd + slot.ga_w) / slot.ga_n);
  1623. LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd + slot.ga_w, slot.n_past_se + ib * bd, dd, slot.ga_i + ib * bd + slot.ga_w + dd, slot.n_past_se + ib * bd + dd);
  1624. llama_kv_cache_seq_add(ctx, slot.id, slot.ga_i, slot.n_past_se, ib * bd);
  1625. llama_kv_cache_seq_div(ctx, slot.id, slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w,slot.ga_n);
  1626. llama_kv_cache_seq_add(ctx, slot.id, slot.ga_i + ib * bd + slot.ga_w,slot.n_past_se + ib * bd, dd);
  1627. slot.n_past_se -= bd;
  1628. slot.ga_i += slot.ga_w / slot.ga_n;
  1629. LOG_TEE("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", slot.n_past_se + bd, slot.n_past_se, slot.ga_i);
  1630. }
  1631. slot.n_past_se += n_tokens;
  1632. }
  1633. }
  1634. llama_batch batch_view =
  1635. {
  1636. n_tokens,
  1637. batch.token + i,
  1638. nullptr,
  1639. batch.pos + i,
  1640. batch.n_seq_id + i,
  1641. batch.seq_id + i,
  1642. batch.logits + i,
  1643. 0, 0, 0, // unused
  1644. };
  1645. const int ret = llama_decode(ctx, batch_view);
  1646. if (ret != 0)
  1647. {
  1648. if (n_batch == 1 || ret < 0)
  1649. {
  1650. // if you get here, it means the KV cache is full - try increasing it via the context size
  1651. LOG_TEE("%s : failed to decode the batch, n_batch = %d, ret = %d\n", __func__, n_batch, ret);
  1652. return false;
  1653. }
  1654. LOG_TEE("%s : failed to find free space in the KV cache, retrying with smaller n_batch = %d\n", __func__, n_batch / 2);
  1655. // retry with half the batch size to try to find a free slot in the KV cache
  1656. n_batch /= 2;
  1657. i -= n_batch;
  1658. continue;
  1659. }
  1660. for (auto & slot : slots)
  1661. {
  1662. if (slot.i_batch < (int) i || slot.i_batch >= (int) (i + n_tokens))
  1663. {
  1664. continue;
  1665. }
  1666. // prompt evaluated for embedding
  1667. if (slot.embedding)
  1668. {
  1669. send_embedding(slot, batch_view);
  1670. slot.release();
  1671. slot.i_batch = -1;
  1672. continue;
  1673. }
  1674. completion_token_output result;
  1675. const llama_token id = llama_sampling_sample(slot.ctx_sampling, ctx, NULL, slot.i_batch - i);
  1676. llama_sampling_accept(slot.ctx_sampling, ctx, id, true);
  1677. slot.n_decoded += 1;
  1678. if (slot.n_decoded == 1)
  1679. {
  1680. slot.t_start_genereration = ggml_time_us();
  1681. slot.t_prompt_processing = (slot.t_start_genereration - slot.t_start_process_prompt) / 1e3;
  1682. metrics.on_prompt_eval(slot);
  1683. }
  1684. llama_token_data_array cur_p = { slot.ctx_sampling->cur.data(), slot.ctx_sampling->cur.size(), false };
  1685. result.tok = id;
  1686. const int32_t n_probs = slot.sparams.n_probs;
  1687. if (slot.sparams.temp <= 0 && n_probs > 0)
  1688. {
  1689. // for llama_sample_token_greedy we need to sort candidates
  1690. llama_sample_softmax(ctx, &cur_p);
  1691. }
  1692. for (size_t i = 0; i < std::min(cur_p.size, (size_t)n_probs); ++i)
  1693. {
  1694. result.probs.push_back({cur_p.data[i].id, cur_p.data[i].p});
  1695. }
  1696. if (!process_token(result, slot))
  1697. {
  1698. slot.release();
  1699. slot.print_timings();
  1700. send_final_response(slot);
  1701. metrics.on_prediction(slot);
  1702. }
  1703. slot.i_batch = -1;
  1704. }
  1705. }
  1706. LOG_VERBOSE("slots updated", {});
  1707. return true;
  1708. }
  1709. json model_meta() {
  1710. return json{
  1711. {"vocab_type", llama_vocab_type(model)},
  1712. {"n_vocab", llama_n_vocab(model)},
  1713. {"n_ctx_train", llama_n_ctx_train(model)},
  1714. {"n_embd", llama_n_embd(model)},
  1715. {"n_params", llama_model_n_params(model)},
  1716. {"size", llama_model_size(model)},
  1717. };
  1718. }
  1719. };
  1720. static void server_print_usage(const char *argv0, const gpt_params &params,
  1721. const server_params &sparams)
  1722. {
  1723. printf("usage: %s [options]\n", argv0);
  1724. printf("\n");
  1725. printf("options:\n");
  1726. printf(" -h, --help show this help message and exit\n");
  1727. printf(" -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled");
  1728. printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
  1729. printf(" -tb N, --threads-batch N number of threads to use during batch and prompt processing (default: same as --threads)\n");
  1730. printf(" --threads-http N number of threads in the http server pool to process requests (default: max(hardware concurrency - 1, --parallel N + 2))\n");
  1731. printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
  1732. printf(" --rope-scaling {none,linear,yarn}\n");
  1733. printf(" RoPE frequency scaling method, defaults to linear unless specified by the model\n");
  1734. printf(" --rope-freq-base N RoPE base frequency (default: loaded from model)\n");
  1735. printf(" --rope-freq-scale N RoPE frequency scaling factor, expands context by a factor of 1/N\n");
  1736. printf(" --yarn-ext-factor N YaRN: extrapolation mix factor (default: 1.0, 0.0 = full interpolation)\n");
  1737. printf(" --yarn-attn-factor N YaRN: scale sqrt(t) or attention magnitude (default: 1.0)\n");
  1738. printf(" --yarn-beta-slow N YaRN: high correction dim or alpha (default: %.1f)\n", params.yarn_beta_slow);
  1739. printf(" --yarn-beta-fast N YaRN: low correction dim or beta (default: %.1f)\n", params.yarn_beta_fast);
  1740. printf(" --pooling {none,mean,cls}\n");
  1741. printf(" pooling type for embeddings, use model default if unspecified\n");
  1742. printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
  1743. printf(" --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
  1744. printf(" not recommended: doubles context memory required and no measurable increase in quality\n");
  1745. if (llama_supports_mlock())
  1746. {
  1747. printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
  1748. }
  1749. if (llama_supports_mmap())
  1750. {
  1751. printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
  1752. }
  1753. printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n");
  1754. printf(" - distribute: spread execution evenly over all nodes\n");
  1755. printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n");
  1756. printf(" - numactl: use the CPU map provided my numactl\n");
  1757. if (llama_supports_gpu_offload()) {
  1758. printf(" -ngl N, --n-gpu-layers N\n");
  1759. printf(" number of layers to store in VRAM\n");
  1760. printf(" -sm SPLIT_MODE, --split-mode SPLIT_MODE\n");
  1761. printf(" how to split the model across multiple GPUs, one of:\n");
  1762. printf(" - none: use one GPU only\n");
  1763. printf(" - layer (default): split layers and KV across GPUs\n");
  1764. printf(" - row: split rows across GPUs\n");
  1765. printf(" -ts SPLIT --tensor-split SPLIT\n");
  1766. printf(" fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1\n");
  1767. printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n");
  1768. printf(" or for intermediate results and KV (with split-mode = row)\n");
  1769. }
  1770. printf(" -m FNAME, --model FNAME\n");
  1771. printf(" model path (default: %s)\n", params.model.c_str());
  1772. printf(" -a ALIAS, --alias ALIAS\n");
  1773. printf(" set an alias for the model, will be added as `model` field in completion response\n");
  1774. printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
  1775. printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
  1776. printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
  1777. printf(" --port PORT port to listen (default (default: %d)\n", sparams.port);
  1778. printf(" --path PUBLIC_PATH path from which to serve static files (default %s)\n", sparams.public_path.c_str());
  1779. printf(" --api-key API_KEY optional api key to enhance server security. If set, requests must include this key for access.\n");
  1780. printf(" --api-key-file FNAME path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access.\n");
  1781. printf(" -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
  1782. printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
  1783. printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
  1784. printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
  1785. printf(" -fa, --flash-attn enable Flash Attention (default: %s)\n", params.flash_attn ? "enabled" : "disabled");
  1786. printf(" -spf FNAME, --system-prompt-file FNAME\n");
  1787. printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
  1788. printf(" -ctk TYPE, --cache-type-k TYPE\n");
  1789. printf(" KV cache data type for K (default: f16)\n");
  1790. printf(" -ctv TYPE, --cache-type-v TYPE\n");
  1791. printf(" KV cache data type for V (default: f16)\n");
  1792. printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA.\n");
  1793. printf(" --log-format log output format: json or text (default: json)\n");
  1794. printf(" --log-disable disables logging to a file.\n");
  1795. printf(" --slots-endpoint-disable disables slots monitoring endpoint.\n");
  1796. printf(" --metrics enable prometheus compatible metrics endpoint (default: %s).\n", sparams.metrics_endpoint ? "enabled" : "disabled");
  1797. printf("\n");
  1798. printf(" -n, --n-predict maximum tokens to predict (default: %d)\n", params.n_predict);
  1799. printf(" --override-kv KEY=TYPE:VALUE\n");
  1800. printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
  1801. printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
  1802. printf(" -gan N, --grp-attn-n N set the group attention factor to extend context size through self-extend(default: 1=disabled), used together with group attention width `--grp-attn-w`\n");
  1803. printf(" -gaw N, --grp-attn-w N set the group attention width to extend context size through self-extend(default: 512), used together with group attention factor `--grp-attn-n`\n");
  1804. printf(" --chat-template JINJA_TEMPLATE\n");
  1805. printf(" set custom jinja chat template (default: template taken from model's metadata)\n");
  1806. printf(" Note: only commonly used templates are accepted, since we don't have jinja parser\n");
  1807. printf("\n");
  1808. }
  1809. static void server_params_parse(int argc, char **argv, server_params &sparams, gpt_params &params)
  1810. {
  1811. gpt_params default_params;
  1812. server_params default_sparams;
  1813. std::string arg;
  1814. bool invalid_param = false;
  1815. for (int i = 1; i < argc; i++)
  1816. {
  1817. arg = argv[i];
  1818. if (arg == "--port")
  1819. {
  1820. if (++i >= argc)
  1821. {
  1822. invalid_param = true;
  1823. break;
  1824. }
  1825. sparams.port = std::stoi(argv[i]);
  1826. }
  1827. else if (arg == "--host")
  1828. {
  1829. if (++i >= argc)
  1830. {
  1831. invalid_param = true;
  1832. break;
  1833. }
  1834. sparams.hostname = argv[i];
  1835. }
  1836. else if (arg == "--path")
  1837. {
  1838. if (++i >= argc)
  1839. {
  1840. invalid_param = true;
  1841. break;
  1842. }
  1843. sparams.public_path = argv[i];
  1844. }
  1845. else if (arg == "--api-key")
  1846. {
  1847. if (++i >= argc)
  1848. {
  1849. invalid_param = true;
  1850. break;
  1851. }
  1852. sparams.api_keys.emplace_back(argv[i]);
  1853. }
  1854. else if (arg == "--api-key-file")
  1855. {
  1856. if (++i >= argc)
  1857. {
  1858. invalid_param = true;
  1859. break;
  1860. }
  1861. std::ifstream key_file(argv[i]);
  1862. if (!key_file) {
  1863. fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
  1864. invalid_param = true;
  1865. break;
  1866. }
  1867. std::string key;
  1868. while (std::getline(key_file, key)) {
  1869. if (key.size() > 0) {
  1870. sparams.api_keys.push_back(key);
  1871. }
  1872. }
  1873. key_file.close();
  1874. }
  1875. else if (arg == "--timeout" || arg == "-to")
  1876. {
  1877. if (++i >= argc)
  1878. {
  1879. invalid_param = true;
  1880. break;
  1881. }
  1882. sparams.read_timeout = std::stoi(argv[i]);
  1883. sparams.write_timeout = std::stoi(argv[i]);
  1884. }
  1885. else if (arg == "-m" || arg == "--model")
  1886. {
  1887. if (++i >= argc)
  1888. {
  1889. invalid_param = true;
  1890. break;
  1891. }
  1892. params.model = argv[i];
  1893. }
  1894. else if (arg == "-a" || arg == "--alias")
  1895. {
  1896. if (++i >= argc)
  1897. {
  1898. invalid_param = true;
  1899. break;
  1900. }
  1901. params.model_alias = argv[i];
  1902. }
  1903. else if (arg == "-h" || arg == "--help")
  1904. {
  1905. server_print_usage(argv[0], default_params, default_sparams);
  1906. exit(0);
  1907. }
  1908. else if (arg == "-c" || arg == "--ctx-size" || arg == "--ctx_size")
  1909. {
  1910. if (++i >= argc)
  1911. {
  1912. invalid_param = true;
  1913. break;
  1914. }
  1915. params.n_ctx = std::stoi(argv[i]);
  1916. }
  1917. else if (arg == "--rope-scaling")
  1918. {
  1919. if (++i >= argc)
  1920. {
  1921. invalid_param = true;
  1922. break;
  1923. }
  1924. std::string value(argv[i]);
  1925. /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
  1926. else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
  1927. else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
  1928. else { invalid_param = true; break; }
  1929. }
  1930. else if (arg == "--rope-freq-base")
  1931. {
  1932. if (++i >= argc)
  1933. {
  1934. invalid_param = true;
  1935. break;
  1936. }
  1937. params.rope_freq_base = std::stof(argv[i]);
  1938. }
  1939. else if (arg == "--rope-freq-scale")
  1940. {
  1941. if (++i >= argc)
  1942. {
  1943. invalid_param = true;
  1944. break;
  1945. }
  1946. params.rope_freq_scale = std::stof(argv[i]);
  1947. }
  1948. else if (arg == "--yarn-ext-factor")
  1949. {
  1950. if (++i >= argc) {
  1951. invalid_param = true;
  1952. break;
  1953. }
  1954. params.yarn_ext_factor = std::stof(argv[i]);
  1955. }
  1956. else if (arg == "--yarn-attn-factor")
  1957. {
  1958. if (++i >= argc) {
  1959. invalid_param = true;
  1960. break;
  1961. }
  1962. params.yarn_attn_factor = std::stof(argv[i]);
  1963. }
  1964. else if (arg == "--yarn-beta-fast")
  1965. {
  1966. if (++i >= argc) {
  1967. invalid_param = true;
  1968. break;
  1969. }
  1970. params.yarn_beta_fast = std::stof(argv[i]);
  1971. }
  1972. else if (arg == "--yarn-beta-slow")
  1973. {
  1974. if (++i >= argc) {
  1975. invalid_param = true;
  1976. break;
  1977. }
  1978. params.yarn_beta_slow = std::stof(argv[i]);
  1979. }
  1980. else if (arg == "--pooling")
  1981. {
  1982. if (++i >= argc) {
  1983. invalid_param = true;
  1984. break;
  1985. }
  1986. std::string value(argv[i]);
  1987. /**/ if (value == "none") { params.pooling_type = LLAMA_POOLING_TYPE_NONE; }
  1988. else if (value == "mean") { params.pooling_type = LLAMA_POOLING_TYPE_MEAN; }
  1989. else if (value == "cls") { params.pooling_type = LLAMA_POOLING_TYPE_CLS; }
  1990. else { invalid_param = true; break; }
  1991. }
  1992. else if (arg == "--threads" || arg == "-t")
  1993. {
  1994. if (++i >= argc)
  1995. {
  1996. invalid_param = true;
  1997. break;
  1998. }
  1999. params.n_threads = std::stoi(argv[i]);
  2000. }
  2001. else if (arg == "--grp-attn-n" || arg == "-gan")
  2002. {
  2003. if (++i >= argc) {
  2004. invalid_param = true;
  2005. break;
  2006. }
  2007. params.grp_attn_n = std::stoi(argv[i]);
  2008. }
  2009. else if (arg == "--grp-attn-w" || arg == "-gaw")
  2010. {
  2011. if (++i >= argc)
  2012. {
  2013. invalid_param = true;
  2014. break;
  2015. }
  2016. params.grp_attn_w = std::stoi(argv[i]);
  2017. }
  2018. else if (arg == "--threads-batch" || arg == "-tb")
  2019. {
  2020. if (++i >= argc)
  2021. {
  2022. invalid_param = true;
  2023. break;
  2024. }
  2025. params.n_threads_batch = std::stoi(argv[i]);
  2026. }
  2027. else if (arg == "--threads-http")
  2028. {
  2029. if (++i >= argc)
  2030. {
  2031. invalid_param = true;
  2032. break;
  2033. }
  2034. sparams.n_threads_http = std::stoi(argv[i]);
  2035. }
  2036. else if (arg == "-b" || arg == "--batch-size")
  2037. {
  2038. if (++i >= argc)
  2039. {
  2040. invalid_param = true;
  2041. break;
  2042. }
  2043. params.n_batch = std::stoi(argv[i]);
  2044. }
  2045. else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers")
  2046. {
  2047. if (++i >= argc)
  2048. {
  2049. invalid_param = true;
  2050. break;
  2051. }
  2052. if (llama_supports_gpu_offload()) {
  2053. params.n_gpu_layers = std::stoi(argv[i]);
  2054. } else {
  2055. LOG_WARNING("Not compiled with GPU offload support, --n-gpu-layers option will be ignored. "
  2056. "See main README.md for information on enabling GPU BLAS support",
  2057. {{"n_gpu_layers", params.n_gpu_layers}});
  2058. }
  2059. }
  2060. else if (arg == "--split-mode" || arg == "-sm")
  2061. {
  2062. if (++i >= argc) {
  2063. invalid_param = true;
  2064. break;
  2065. }
  2066. std::string arg_next = argv[i];
  2067. if (arg_next == "none")
  2068. {
  2069. params.split_mode = LLAMA_SPLIT_MODE_NONE;
  2070. }
  2071. else if (arg_next == "layer")
  2072. {
  2073. params.split_mode = LLAMA_SPLIT_MODE_LAYER;
  2074. }
  2075. else if (arg_next == "row")
  2076. {
  2077. params.split_mode = LLAMA_SPLIT_MODE_ROW;
  2078. }
  2079. else {
  2080. invalid_param = true;
  2081. break;
  2082. }
  2083. #ifndef GGML_USE_CUDA
  2084. fprintf(stderr, "warning: llama.cpp was compiled without CUDA. Setting the split mode has no effect.\n");
  2085. #endif // GGML_USE_CUDA
  2086. }
  2087. else if (arg == "--tensor-split" || arg == "-ts")
  2088. {
  2089. if (++i >= argc)
  2090. {
  2091. invalid_param = true;
  2092. break;
  2093. }
  2094. #if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
  2095. std::string arg_next = argv[i];
  2096. // split string by , and /
  2097. const std::regex regex{R"([,/]+)"};
  2098. std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
  2099. std::vector<std::string> split_arg{it, {}};
  2100. GGML_ASSERT(split_arg.size() <= llama_max_devices());
  2101. for (size_t i_device = 0; i_device < llama_max_devices(); ++i_device)
  2102. {
  2103. if (i_device < split_arg.size())
  2104. {
  2105. params.tensor_split[i_device] = std::stof(split_arg[i_device]);
  2106. }
  2107. else
  2108. {
  2109. params.tensor_split[i_device] = 0.0f;
  2110. }
  2111. }
  2112. #else
  2113. LOG_WARNING("llama.cpp was compiled without CUDA. It is not possible to set a tensor split.\n", {});
  2114. #endif // GGML_USE_CUDA
  2115. }
  2116. else if (arg == "--main-gpu" || arg == "-mg")
  2117. {
  2118. if (++i >= argc)
  2119. {
  2120. invalid_param = true;
  2121. break;
  2122. }
  2123. #if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
  2124. params.main_gpu = std::stoi(argv[i]);
  2125. #else
  2126. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
  2127. #endif
  2128. }
  2129. else if (arg == "--lora")
  2130. {
  2131. if (++i >= argc)
  2132. {
  2133. invalid_param = true;
  2134. break;
  2135. }
  2136. params.lora_adapter.emplace_back(argv[i], 1.0f);
  2137. params.use_mmap = false;
  2138. }
  2139. else if (arg == "--lora-scaled")
  2140. {
  2141. if (++i >= argc)
  2142. {
  2143. invalid_param = true;
  2144. break;
  2145. }
  2146. const char * lora_adapter = argv[i];
  2147. if (++i >= argc)
  2148. {
  2149. invalid_param = true;
  2150. break;
  2151. }
  2152. params.lora_adapter.emplace_back(lora_adapter, std::stof(argv[i]));
  2153. params.use_mmap = false;
  2154. }
  2155. else if (arg == "-v" || arg == "--verbose")
  2156. {
  2157. server_verbose = true;
  2158. }
  2159. else if (arg == "--mlock")
  2160. {
  2161. params.use_mlock = true;
  2162. }
  2163. else if (arg == "--no-mmap")
  2164. {
  2165. params.use_mmap = false;
  2166. }
  2167. else if (arg == "--numa")
  2168. {
  2169. if (++i >= argc) {
  2170. invalid_param = true;
  2171. break;
  2172. } else {
  2173. std::string value(argv[i]);
  2174. /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
  2175. else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
  2176. else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
  2177. else { invalid_param = true; break; }
  2178. }
  2179. }
  2180. else if (arg == "--embedding")
  2181. {
  2182. params.embedding = true;
  2183. }
  2184. else if (arg == "-cb" || arg == "--cont-batching")
  2185. {
  2186. params.cont_batching = true;
  2187. }
  2188. else if (arg == "-fa" || arg == "--flash-attn")
  2189. {
  2190. params.flash_attn = true;
  2191. }
  2192. else if (arg == "-np" || arg == "--parallel")
  2193. {
  2194. if (++i >= argc)
  2195. {
  2196. invalid_param = true;
  2197. break;
  2198. }
  2199. params.n_parallel = std::stoi(argv[i]);
  2200. }
  2201. else if (arg == "-n" || arg == "--n-predict")
  2202. {
  2203. if (++i >= argc)
  2204. {
  2205. invalid_param = true;
  2206. break;
  2207. }
  2208. params.n_predict = std::stoi(argv[i]);
  2209. }
  2210. else if (arg == "-ctk" || arg == "--cache-type-k") {
  2211. params.cache_type_k = argv[++i];
  2212. }
  2213. else if (arg == "-ctv" || arg == "--cache-type-v") {
  2214. params.cache_type_v = argv[++i];
  2215. }
  2216. else if(arg == "--mmproj")
  2217. {
  2218. if (++i >= argc)
  2219. {
  2220. invalid_param = true;
  2221. break;
  2222. }
  2223. params.mmproj = argv[i];
  2224. }
  2225. else if (arg == "--log-format")
  2226. {
  2227. if (++i >= argc)
  2228. {
  2229. invalid_param = true;
  2230. break;
  2231. }
  2232. if (std::strcmp(argv[i], "json") == 0)
  2233. {
  2234. server_log_json = true;
  2235. }
  2236. else if (std::strcmp(argv[i], "text") == 0)
  2237. {
  2238. server_log_json = false;
  2239. }
  2240. else
  2241. {
  2242. invalid_param = true;
  2243. break;
  2244. }
  2245. }
  2246. else if (arg == "--log-disable")
  2247. {
  2248. log_set_target(stdout);
  2249. LOG_DEBUG("logging to file is disabled.", {});
  2250. }
  2251. else if (arg == "--slots-endpoint-disable")
  2252. {
  2253. sparams.slots_endpoint = false;
  2254. }
  2255. else if (arg == "--metrics")
  2256. {
  2257. sparams.metrics_endpoint = true;
  2258. }
  2259. else if (arg == "--chat-template")
  2260. {
  2261. if (++i >= argc)
  2262. {
  2263. invalid_param = true;
  2264. break;
  2265. }
  2266. if (!verify_custom_template(argv[i])) {
  2267. fprintf(stderr, "error: the supplied chat template is not supported: %s\n", argv[i]);
  2268. fprintf(stderr, "note: llama.cpp does not use jinja parser, we only support commonly used templates\n");
  2269. invalid_param = true;
  2270. break;
  2271. }
  2272. }
  2273. else if (arg == "--override-kv")
  2274. {
  2275. if (++i >= argc) {
  2276. invalid_param = true;
  2277. break;
  2278. }
  2279. char * sep = strchr(argv[i], '=');
  2280. if (sep == nullptr || sep - argv[i] >= 128) {
  2281. fprintf(stderr, "error: Malformed KV override: %s\n", argv[i]);
  2282. invalid_param = true;
  2283. break;
  2284. }
  2285. struct llama_model_kv_override kvo;
  2286. std::strncpy(kvo.key, argv[i], sep - argv[i]);
  2287. kvo.key[sep - argv[i]] = 0;
  2288. sep++;
  2289. if (strncmp(sep, "int:", 4) == 0) {
  2290. sep += 4;
  2291. kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
  2292. kvo.val_i64 = std::atol(sep);
  2293. } else if (strncmp(sep, "float:", 6) == 0) {
  2294. sep += 6;
  2295. kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
  2296. kvo.val_f64 = std::atof(sep);
  2297. } else if (strncmp(sep, "bool:", 5) == 0) {
  2298. sep += 5;
  2299. kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
  2300. if (std::strcmp(sep, "true") == 0) {
  2301. kvo.val_bool = true;
  2302. } else if (std::strcmp(sep, "false") == 0) {
  2303. kvo.val_bool = false;
  2304. } else {
  2305. fprintf(stderr, "error: Invalid boolean value for KV override: %s\n", argv[i]);
  2306. invalid_param = true;
  2307. break;
  2308. }
  2309. } else {
  2310. fprintf(stderr, "error: Invalid type for KV override: %s\n", argv[i]);
  2311. invalid_param = true;
  2312. break;
  2313. }
  2314. params.kv_overrides.push_back(kvo);
  2315. }
  2316. else
  2317. {
  2318. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  2319. server_print_usage(argv[0], default_params, default_sparams);
  2320. exit(1);
  2321. }
  2322. }
  2323. if (!params.kv_overrides.empty()) {
  2324. params.kv_overrides.emplace_back();
  2325. params.kv_overrides.back().key[0] = 0;
  2326. }
  2327. if (invalid_param)
  2328. {
  2329. fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
  2330. server_print_usage(argv[0], default_params, default_sparams);
  2331. exit(1);
  2332. }
  2333. }
  2334. /* llama.cpp completion api semantics */
  2335. static json format_partial_response(
  2336. llama_server_context &llama, server_slot *slot, const std::string &content, const std::vector<completion_token_output> &probs
  2337. ) {
  2338. json res = json
  2339. {
  2340. {"content", content },
  2341. {"stop", false},
  2342. {"slot_id", slot->id },
  2343. {"multimodal", llama.multimodal }
  2344. };
  2345. if (slot->sparams.n_probs > 0)
  2346. {
  2347. res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
  2348. }
  2349. return res;
  2350. }
  2351. static json format_tokenizer_response(const std::vector<llama_token> &tokens)
  2352. {
  2353. return json {
  2354. {"tokens", tokens}
  2355. };
  2356. }
  2357. static json format_detokenized_response(std::string content)
  2358. {
  2359. return json {
  2360. {"content", content}
  2361. };
  2362. }
  2363. static void log_server_request(const httplib::Request &req, const httplib::Response &res)
  2364. {
  2365. // skip GH copilot requests when using default port
  2366. if (req.path == "/health" || req.path == "/v1/health" || req.path == "/v1/completions")
  2367. {
  2368. return;
  2369. }
  2370. LOG_DEBUG("request", {
  2371. {"remote_addr", req.remote_addr},
  2372. {"remote_port", req.remote_port},
  2373. {"status", res.status},
  2374. {"method", req.method},
  2375. {"path", req.path},
  2376. {"params", req.params},
  2377. });
  2378. LOG_VERBOSE("request", {
  2379. {"request", req.body},
  2380. {"response", res.body},
  2381. });
  2382. }
  2383. static void append_to_generated_text_from_generated_token_probs(llama_server_context &llama, server_slot *slot)
  2384. {
  2385. auto & gtps = slot->generated_token_probs;
  2386. auto translator = token_translator{llama.ctx};
  2387. auto add_strlen = [=](size_t sum, const completion_token_output & cto) { return sum + translator(cto).size(); };
  2388. const size_t len = std::accumulate(gtps.begin(), gtps.end(), size_t(0), add_strlen);
  2389. if (slot->generated_text.capacity() < slot->generated_text.size() + len)
  2390. {
  2391. slot->generated_text.reserve(slot->generated_text.size() + len);
  2392. }
  2393. for (const completion_token_output & cto : gtps)
  2394. {
  2395. slot->generated_text += translator(cto);
  2396. }
  2397. }
  2398. std::function<void(int)> shutdown_handler;
  2399. std::atomic_flag is_terminating = ATOMIC_FLAG_INIT;
  2400. inline void signal_handler(int signal) {
  2401. if (is_terminating.test_and_set()) {
  2402. // in case it hangs, we can force terminate the server by hitting Ctrl+C twice
  2403. // this is for better developer experience, we can remove when the server is stable enough
  2404. fprintf(stderr, "Received second interrupt, terminating immediately.\n");
  2405. exit(1);
  2406. }
  2407. shutdown_handler(signal);
  2408. }
  2409. static bool update_load_progress(float progress, void *data)
  2410. {
  2411. ((llama_server_context*)data)->modelProgress = progress;
  2412. return true;
  2413. }
  2414. #if defined(_WIN32)
  2415. char* wchar_to_char(const wchar_t* wstr) {
  2416. if (wstr == nullptr) return nullptr;
  2417. // Determine the number of bytes needed for the UTF-8 string
  2418. int bytes = WideCharToMultiByte(CP_UTF8, 0, wstr, -1, nullptr, 0, nullptr, nullptr);
  2419. char* str = new char[bytes];
  2420. // Convert the wide-character string to a UTF-8 string
  2421. WideCharToMultiByte(CP_UTF8, 0, wstr, -1, str, bytes, nullptr, nullptr);
  2422. return str;
  2423. }
  2424. int wmain(int argc, wchar_t **wargv) {
  2425. char** argv = new char*[argc];
  2426. for (int i = 0; i < argc; ++i) {
  2427. argv[i] = wchar_to_char(wargv[i]);
  2428. }
  2429. // Adjust error mode to avoid error dialog after we start.
  2430. SetErrorMode(SEM_FAILCRITICALERRORS);
  2431. #else
  2432. int main(int argc, char **argv) {
  2433. #endif
  2434. #if SERVER_VERBOSE != 1
  2435. log_disable();
  2436. #endif
  2437. // own arguments required by this example
  2438. gpt_params params;
  2439. server_params sparams;
  2440. // struct that contains llama context and inference
  2441. llama_server_context llama;
  2442. server_params_parse(argc, argv, sparams, params);
  2443. if (params.model_alias == "unknown")
  2444. {
  2445. params.model_alias = params.model;
  2446. }
  2447. llama_backend_init();
  2448. llama_numa_init(params.numa);
  2449. LOG_INFO("build info", {{"build", LLAMA_BUILD_NUMBER},
  2450. {"commit", LLAMA_COMMIT}});
  2451. LOG_INFO("system info", {
  2452. {"n_threads", params.n_threads},
  2453. {"n_threads_batch", params.n_threads_batch},
  2454. {"total_threads", std::thread::hardware_concurrency()},
  2455. {"system_info", llama_print_system_info()},
  2456. });
  2457. httplib::Server svr;
  2458. std::atomic<server_state> state{SERVER_STATE_LOADING_MODEL};
  2459. svr.set_default_headers({{"Server", "llama.cpp"}});
  2460. // CORS preflight
  2461. svr.Options(R"(.*)", [](const httplib::Request &req, httplib::Response &res) {
  2462. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2463. res.set_header("Access-Control-Allow-Credentials", "true");
  2464. res.set_header("Access-Control-Allow-Methods", "POST");
  2465. res.set_header("Access-Control-Allow-Headers", "*");
  2466. });
  2467. svr.Get("/health", [&](const httplib::Request& req, httplib::Response& res) {
  2468. server_state current_state = state.load();
  2469. switch(current_state) {
  2470. case SERVER_STATE_READY: {
  2471. // request slots data using task queue
  2472. task_server task;
  2473. task.id = llama.queue_tasks.get_new_id();
  2474. task.type = TASK_TYPE_METRICS;
  2475. task.target_id = -1;
  2476. llama.queue_results.add_waiting_task_id(task.id);
  2477. llama.queue_tasks.post(task);
  2478. // get the result
  2479. task_result result = llama.queue_results.recv(task.id);
  2480. llama.queue_results.remove_waiting_task_id(task.id);
  2481. int n_idle_slots = result.result_json["idle"];
  2482. int n_processing_slots = result.result_json["processing"];
  2483. json health = {
  2484. {"status", "ok"},
  2485. {"slots_idle", n_idle_slots},
  2486. {"slots_processing", n_processing_slots}};
  2487. res.status = 200; // HTTP OK
  2488. if (sparams.slots_endpoint && req.has_param("include_slots")) {
  2489. health["slots"] = result.result_json["slots"];
  2490. }
  2491. if (n_idle_slots == 0) {
  2492. health["status"] = "no slot available";
  2493. if (req.has_param("fail_on_no_slot")) {
  2494. res.status = 503; // HTTP Service Unavailable
  2495. }
  2496. }
  2497. res.set_content(health.dump(), "application/json");
  2498. break;
  2499. }
  2500. case SERVER_STATE_LOADING_MODEL:
  2501. char buf[128];
  2502. snprintf(&buf[0], 128, R"({"status": "loading model", "progress": %0.2f})", llama.modelProgress);
  2503. res.set_content(buf, "application/json");
  2504. res.status = 503; // HTTP Service Unavailable
  2505. break;
  2506. case SERVER_STATE_ERROR:
  2507. res.set_content(R"({"status": "error", "error": "Model failed to load"})", "application/json");
  2508. res.status = 500; // HTTP Internal Server Error
  2509. break;
  2510. }
  2511. });
  2512. if (sparams.slots_endpoint) {
  2513. svr.Get("/slots", [&](const httplib::Request&, httplib::Response& res) {
  2514. // request slots data using task queue
  2515. task_server task;
  2516. task.id = llama.queue_tasks.get_new_id();
  2517. task.type = TASK_TYPE_METRICS;
  2518. task.target_id = -1;
  2519. llama.queue_results.add_waiting_task_id(task.id);
  2520. llama.queue_tasks.post(task);
  2521. // get the result
  2522. task_result result = llama.queue_results.recv(task.id);
  2523. llama.queue_results.remove_waiting_task_id(task.id);
  2524. res.set_content(result.result_json["slots"].dump(), "application/json");
  2525. res.status = 200; // HTTP OK
  2526. });
  2527. }
  2528. if (sparams.metrics_endpoint) {
  2529. svr.Get("/metrics", [&](const httplib::Request&, httplib::Response& res) {
  2530. // request slots data using task queue
  2531. task_server task;
  2532. task.id = llama.queue_tasks.get_new_id();
  2533. task.type = TASK_TYPE_METRICS;
  2534. task.target_id = -1;
  2535. llama.queue_results.add_waiting_task_id(task.id);
  2536. llama.queue_tasks.post(task);
  2537. // get the result
  2538. task_result result = llama.queue_results.recv(task.id);
  2539. llama.queue_results.remove_waiting_task_id(task.id);
  2540. json data = result.result_json;
  2541. uint64_t n_prompt_tokens_processed = data["n_prompt_tokens_processed"];
  2542. uint64_t t_prompt_processing = data["t_prompt_processing"];
  2543. uint64_t n_tokens_predicted = data["n_tokens_predicted"];
  2544. uint64_t t_tokens_generation = data["t_tokens_generation"];
  2545. int32_t kv_cache_used_cells = data["kv_cache_used_cells"];
  2546. // metrics definition: https://prometheus.io/docs/practices/naming/#metric-names
  2547. json all_metrics_def = json {
  2548. {"counter", {{
  2549. {"name", "prompt_tokens_total"},
  2550. {"help", "Number of prompt tokens processed."},
  2551. {"value", data["n_prompt_tokens_processed_total"]}
  2552. }, {
  2553. {"name", "tokens_predicted_total"},
  2554. {"help", "Number of generation tokens processed."},
  2555. {"value", data["n_tokens_predicted_total"]}
  2556. }}},
  2557. {"gauge", {{
  2558. {"name", "prompt_tokens_seconds"},
  2559. {"help", "Average prompt throughput in tokens/s."},
  2560. {"value", n_prompt_tokens_processed ? 1e3 / t_prompt_processing * n_prompt_tokens_processed : 0}
  2561. },{
  2562. {"name", "predicted_tokens_seconds"},
  2563. {"help", "Average generation throughput in tokens/s."},
  2564. {"value", n_tokens_predicted ? 1e3 / t_tokens_generation * n_tokens_predicted : 0}
  2565. },{
  2566. {"name", "kv_cache_usage_ratio"},
  2567. {"help", "KV-cache usage. 1 means 100 percent usage."},
  2568. {"value", 1. * kv_cache_used_cells / params.n_ctx}
  2569. },{
  2570. {"name", "kv_cache_tokens"},
  2571. {"help", "KV-cache tokens."},
  2572. {"value", data["kv_cache_tokens_count"]}
  2573. },{
  2574. {"name", "requests_processing"},
  2575. {"help", "Number of request processing."},
  2576. {"value", data["processing"]}
  2577. },{
  2578. {"name", "requests_deferred"},
  2579. {"help", "Number of request deferred."},
  2580. {"value", data["deferred"]}
  2581. }}}
  2582. };
  2583. std::stringstream prometheus;
  2584. for (const auto& el : all_metrics_def.items()) {
  2585. const auto& type = el.key();
  2586. const auto& metrics_def = el.value();
  2587. for (const auto& metric_def : metrics_def) {
  2588. std::string name = metric_def["name"];
  2589. std::string help = metric_def["help"];
  2590. auto value = json_value(metric_def, "value", 0);
  2591. prometheus << "# HELP llamacpp:" << name << " " << help << "\n"
  2592. << "# TYPE llamacpp:" << name << " " << type << "\n"
  2593. << "llamacpp:" << name << " " << value << "\n";
  2594. }
  2595. }
  2596. res.set_content(prometheus.str(), "text/plain; version=0.0.4");
  2597. res.status = 200; // HTTP OK
  2598. });
  2599. }
  2600. svr.set_logger(log_server_request);
  2601. svr.set_exception_handler([](const httplib::Request &, httplib::Response &res, std::exception_ptr ep)
  2602. {
  2603. const char fmt[] = "500 Internal Server Error\n%s";
  2604. char buf[BUFSIZ];
  2605. try
  2606. {
  2607. std::rethrow_exception(std::move(ep));
  2608. }
  2609. catch (std::exception &e)
  2610. {
  2611. snprintf(buf, sizeof(buf), fmt, e.what());
  2612. }
  2613. catch (...)
  2614. {
  2615. snprintf(buf, sizeof(buf), fmt, "Unknown Exception");
  2616. }
  2617. res.set_content(buf, "text/plain; charset=utf-8");
  2618. res.status = 500;
  2619. });
  2620. svr.set_error_handler([](const httplib::Request &, httplib::Response &res)
  2621. {
  2622. if (res.status == 401)
  2623. {
  2624. res.set_content("Unauthorized", "text/plain; charset=utf-8");
  2625. }
  2626. if (res.status == 400)
  2627. {
  2628. res.set_content("Invalid request", "text/plain; charset=utf-8");
  2629. }
  2630. else if (res.status == 404)
  2631. {
  2632. res.set_content("File Not Found", "text/plain; charset=utf-8");
  2633. res.status = 404;
  2634. }
  2635. });
  2636. // set timeouts and change hostname and port
  2637. svr.set_read_timeout (sparams.read_timeout);
  2638. svr.set_write_timeout(sparams.write_timeout);
  2639. if (!svr.bind_to_port(sparams.hostname, sparams.port))
  2640. {
  2641. fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", sparams.hostname.c_str(), sparams.port);
  2642. return 1;
  2643. }
  2644. // Set the base directory for serving static files
  2645. svr.set_base_dir(sparams.public_path);
  2646. std::unordered_map<std::string, std::string> log_data;
  2647. log_data["hostname"] = sparams.hostname;
  2648. log_data["port"] = std::to_string(sparams.port);
  2649. if (sparams.api_keys.size() == 1) {
  2650. log_data["api_key"] = "api_key: ****" + sparams.api_keys[0].substr(sparams.api_keys[0].length() - 4);
  2651. } else if (sparams.api_keys.size() > 1) {
  2652. log_data["api_key"] = "api_key: " + std::to_string(sparams.api_keys.size()) + " keys loaded";
  2653. }
  2654. if (sparams.n_threads_http < 1) {
  2655. // +2 threads for monitoring endpoints
  2656. sparams.n_threads_http = std::max(params.n_parallel + 2, (int32_t) std::thread::hardware_concurrency() - 1);
  2657. }
  2658. log_data["n_threads_http"] = std::to_string(sparams.n_threads_http);
  2659. svr.new_task_queue = [&sparams] { return new httplib::ThreadPool(sparams.n_threads_http); };
  2660. LOG_INFO("HTTP server listening", log_data);
  2661. // run the HTTP server in a thread - see comment below
  2662. std::thread t([&]()
  2663. {
  2664. if (!svr.listen_after_bind())
  2665. {
  2666. state.store(SERVER_STATE_ERROR);
  2667. return 1;
  2668. }
  2669. return 0;
  2670. });
  2671. // load the model
  2672. params.progress_callback = update_load_progress;
  2673. params.progress_callback_user_data = (void*)&llama;
  2674. if (!llama.load_model(params))
  2675. {
  2676. state.store(SERVER_STATE_ERROR);
  2677. return 1;
  2678. } else {
  2679. llama.initialize();
  2680. state.store(SERVER_STATE_READY);
  2681. LOG_INFO("model loaded", {});
  2682. }
  2683. const auto model_meta = llama.model_meta();
  2684. // Middleware for API key validation
  2685. auto validate_api_key = [&sparams](const httplib::Request &req, httplib::Response &res) -> bool {
  2686. // If API key is not set, skip validation
  2687. if (sparams.api_keys.empty()) {
  2688. return true;
  2689. }
  2690. // Check for API key in the header
  2691. auto auth_header = req.get_header_value("Authorization");
  2692. std::string prefix = "Bearer ";
  2693. if (auth_header.substr(0, prefix.size()) == prefix) {
  2694. std::string received_api_key = auth_header.substr(prefix.size());
  2695. if (std::find(sparams.api_keys.begin(), sparams.api_keys.end(), received_api_key) != sparams.api_keys.end()) {
  2696. return true; // API key is valid
  2697. }
  2698. }
  2699. // API key is invalid or not provided
  2700. res.set_content("Unauthorized: Invalid API Key", "text/plain; charset=utf-8");
  2701. res.status = 401; // Unauthorized
  2702. LOG_WARNING("Unauthorized: Invalid API Key", {});
  2703. return false;
  2704. };
  2705. // this is only called if no index.html is found in the public --path
  2706. svr.Get("/", [](const httplib::Request &, httplib::Response &res)
  2707. {
  2708. res.set_content("server running", "text/plain; charset=utf-8");
  2709. res.status = 200; // Unauthorized
  2710. return true;
  2711. });
  2712. svr.Post("/completion", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res)
  2713. {
  2714. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2715. if (!validate_api_key(req, res)) {
  2716. return;
  2717. }
  2718. json data = json::parse(req.body);
  2719. const int task_id = llama.queue_tasks.get_new_id();
  2720. llama.queue_results.add_waiting_task_id(task_id);
  2721. llama.request_completion(task_id, data, false, -1);
  2722. if (!json_value(data, "stream", false)) {
  2723. std::string completion_text;
  2724. task_result result = llama.queue_results.recv(task_id);
  2725. if (!result.error && result.stop) {
  2726. res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
  2727. }
  2728. else
  2729. {
  2730. res.status = 404;
  2731. res.set_content(result.result_json["content"], "text/plain; charset=utf-8");
  2732. }
  2733. llama.queue_results.remove_waiting_task_id(task_id);
  2734. } else {
  2735. const auto chunked_content_provider = [task_id, &llama](size_t, httplib::DataSink & sink)
  2736. {
  2737. while (true)
  2738. {
  2739. task_result result = llama.queue_results.recv(task_id);
  2740. if (!result.error) {
  2741. const std::string str =
  2742. "data: " +
  2743. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
  2744. "\n\n";
  2745. LOG_VERBOSE("data stream", {
  2746. { "to_send", str }
  2747. });
  2748. if (!sink.write(str.c_str(), str.size()))
  2749. {
  2750. llama.queue_results.remove_waiting_task_id(task_id);
  2751. return false;
  2752. }
  2753. if (result.stop) {
  2754. break;
  2755. }
  2756. } else {
  2757. const std::string str =
  2758. "error: " +
  2759. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
  2760. "\n\n";
  2761. LOG_VERBOSE("data stream", {
  2762. { "to_send", str }
  2763. });
  2764. if (!sink.write(str.c_str(), str.size()))
  2765. {
  2766. llama.queue_results.remove_waiting_task_id(task_id);
  2767. return false;
  2768. }
  2769. break;
  2770. }
  2771. }
  2772. llama.queue_results.remove_waiting_task_id(task_id);
  2773. sink.done();
  2774. return true;
  2775. };
  2776. auto on_complete = [task_id, &llama] (bool)
  2777. {
  2778. // cancel
  2779. llama.request_cancel(task_id);
  2780. llama.queue_results.remove_waiting_task_id(task_id);
  2781. };
  2782. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2783. }
  2784. });
  2785. svr.Post("/tokenize", [&llama](const httplib::Request &req, httplib::Response &res)
  2786. {
  2787. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2788. const json body = json::parse(req.body);
  2789. std::vector<llama_token> tokens;
  2790. if (body.count("content") != 0)
  2791. {
  2792. tokens = llama.tokenize(body["content"], false);
  2793. }
  2794. const json data = format_tokenizer_response(tokens);
  2795. return res.set_content(data.dump(), "application/json; charset=utf-8");
  2796. });
  2797. svr.Post("/detokenize", [&llama](const httplib::Request &req, httplib::Response &res)
  2798. {
  2799. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2800. const json body = json::parse(req.body);
  2801. std::string content;
  2802. if (body.count("tokens") != 0)
  2803. {
  2804. const std::vector<llama_token> tokens = body["tokens"];
  2805. content = tokens_to_str(llama.ctx, tokens.cbegin(), tokens.cend());
  2806. }
  2807. const json data = format_detokenized_response(content);
  2808. return res.set_content(data.dump(), "application/json; charset=utf-8");
  2809. });
  2810. svr.Post("/embedding", [&llama](const httplib::Request &req, httplib::Response &res)
  2811. {
  2812. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2813. const json body = json::parse(req.body);
  2814. json prompt;
  2815. if (body.count("content") != 0)
  2816. {
  2817. prompt = body["content"];
  2818. }
  2819. else
  2820. {
  2821. prompt = "";
  2822. }
  2823. if (prompt.size() == 1) {
  2824. prompt = prompt[0];
  2825. }
  2826. // create and queue the task
  2827. json responses;
  2828. {
  2829. const int id_task = llama.queue_tasks.get_new_id();
  2830. llama.queue_results.add_waiting_task_id(id_task);
  2831. llama.request_completion(id_task, {{"prompt", prompt}}, true, -1);
  2832. // get the result
  2833. task_result result = llama.queue_results.recv(id_task);
  2834. llama.queue_results.remove_waiting_task_id(id_task);
  2835. if (result.error) {
  2836. return res.set_content(result.result_json.dump(), "application/json; charset=utf-8");
  2837. }
  2838. responses = result.result_json.value("results", std::vector<json>{result.result_json});
  2839. json embeddings = json::array();
  2840. int prompt_n = 0;
  2841. for (auto & elem : responses) {
  2842. embeddings.push_back(elem.at("embedding"));
  2843. prompt_n += elem.at("timings").at("prompt_n").get<int>();
  2844. }
  2845. // send the result
  2846. json embedding_res = json{{"embedding", embeddings}, {"prompt_n", prompt_n}};
  2847. return res.set_content(embedding_res.dump(), "application/json; charset=utf-8");
  2848. }
  2849. });
  2850. // GG: if I put the main loop inside a thread, it crashes on the first request when build in Debug!?
  2851. // "Bus error: 10" - this is on macOS, it does not crash on Linux
  2852. //std::thread t2([&]()
  2853. /*{
  2854. bool running = true;
  2855. while (running)
  2856. {
  2857. running = llama.update_slots();
  2858. }
  2859. }*/
  2860. //);
  2861. llama.queue_tasks.on_new_task(std::bind(
  2862. &llama_server_context::process_single_task, &llama, std::placeholders::_1));
  2863. llama.queue_tasks.on_finish_multitask(std::bind(
  2864. &llama_server_context::on_finish_multitask, &llama, std::placeholders::_1));
  2865. llama.queue_tasks.on_run_slots(std::bind(
  2866. &llama_server_context::update_slots, &llama));
  2867. llama.queue_results.on_multitask_update(std::bind(
  2868. &llama_server_queue::update_multitask,
  2869. &llama.queue_tasks,
  2870. std::placeholders::_1,
  2871. std::placeholders::_2,
  2872. std::placeholders::_3
  2873. ));
  2874. shutdown_handler = [&](int) {
  2875. llama.queue_tasks.terminate();
  2876. };
  2877. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  2878. struct sigaction sigint_action;
  2879. sigint_action.sa_handler = signal_handler;
  2880. sigemptyset (&sigint_action.sa_mask);
  2881. sigint_action.sa_flags = 0;
  2882. sigaction(SIGINT, &sigint_action, NULL);
  2883. #elif defined (_WIN32)
  2884. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  2885. return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
  2886. };
  2887. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  2888. for (int i = 0; i < argc; ++i) {
  2889. delete[] argv[i];
  2890. }
  2891. delete[] argv;
  2892. #endif
  2893. llama.queue_tasks.start_loop();
  2894. svr.stop();
  2895. t.join();
  2896. llama_backend_free();
  2897. return 0;
  2898. }