server.cpp 126 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338
  1. // MIT License
  2. // Copyright (c) 2023 Georgi Gerganov
  3. // Permission is hereby granted, free of charge, to any person obtaining a copy
  4. // of this software and associated documentation files (the "Software"), to deal
  5. // in the Software without restriction, including without limitation the rights
  6. // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  7. // copies of the Software, and to permit persons to whom the Software is
  8. // furnished to do so, subject to the following conditions:
  9. // The above copyright notice and this permission notice shall be included in all
  10. // copies or substantial portions of the Software.
  11. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  12. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  14. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  15. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  16. // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  17. // SOFTWARE.
  18. #include "common.h"
  19. #include "llama.h"
  20. #include "grammar-parser.h"
  21. #include "utils.hpp"
  22. #include "../llava/clip.h"
  23. #include "../llava/llava.h"
  24. #include "stb_image.h"
  25. #ifndef NDEBUG
  26. // crash the server in debug mode, otherwise send an http 500 error
  27. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  28. #endif
  29. // increase max payload length to allow use of larger context size
  30. #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
  31. #include "httplib.h"
  32. #include "json.hpp"
  33. #if defined(_WIN32)
  34. #include <windows.h>
  35. #endif
  36. #include <cstddef>
  37. #include <thread>
  38. #include <chrono>
  39. #include <condition_variable>
  40. #include <atomic>
  41. #include <signal.h>
  42. using json = nlohmann::json;
  43. struct server_params {
  44. std::string hostname = "127.0.0.1";
  45. std::vector<std::string> api_keys;
  46. std::string public_path = "examples/server/public";
  47. std::string chat_template = "";
  48. int32_t port = 8080;
  49. int32_t read_timeout = 600;
  50. int32_t write_timeout = 600;
  51. bool slots_endpoint = true;
  52. bool metrics_endpoint = false;
  53. int n_threads_http = -1;
  54. };
  55. bool server_verbose = false;
  56. bool server_log_json = false;
  57. enum stop_type {
  58. STOP_FULL,
  59. STOP_PARTIAL,
  60. };
  61. // TODO: can become bool if we can't find use of more states
  62. enum slot_state {
  63. IDLE,
  64. PROCESSING,
  65. };
  66. enum slot_command {
  67. NONE,
  68. LOAD_PROMPT,
  69. RELEASE,
  70. };
  71. struct slot_params {
  72. bool stream = true;
  73. bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt
  74. uint32_t seed = -1; // RNG seed
  75. int32_t n_keep = 0; // number of tokens to keep from initial prompt
  76. int32_t n_predict = -1; // new tokens to predict
  77. std::vector<std::string> antiprompt;
  78. json input_prefix;
  79. json input_suffix;
  80. };
  81. struct slot_image {
  82. int32_t id;
  83. bool request_encode_image = false;
  84. float * image_embedding = nullptr;
  85. int32_t image_tokens = 0;
  86. clip_image_u8 * img_data;
  87. std::string prefix_prompt; // before of this image
  88. };
  89. struct server_slot {
  90. int id;
  91. int task_id = -1;
  92. struct slot_params params;
  93. slot_state state = IDLE;
  94. slot_command command = NONE;
  95. // used to determine the slot that has been used the longest
  96. int64_t t_last_used = -1;
  97. // generation props
  98. int32_t n_ctx = 0; // context size per slot
  99. int32_t n_past = 0;
  100. int32_t n_decoded = 0;
  101. int32_t n_remaining = -1;
  102. int32_t i_batch = -1;
  103. int32_t n_predict = -1;
  104. int32_t n_prompt_tokens = 0;
  105. int32_t n_prompt_tokens_processed = 0;
  106. json prompt;
  107. std::string generated_text;
  108. llama_token sampled;
  109. std::vector<llama_token> cache_tokens;
  110. std::vector<completion_token_output> generated_token_probs;
  111. bool infill = false;
  112. bool embedding = false;
  113. bool has_next_token = true;
  114. bool truncated = false;
  115. bool stopped_eos = false;
  116. bool stopped_word = false;
  117. bool stopped_limit = false;
  118. std::string stopping_word;
  119. // sampling
  120. struct llama_sampling_params sparams;
  121. llama_sampling_context *ctx_sampling = nullptr;
  122. int32_t ga_i = 0; // group-attention state
  123. int32_t ga_n = 1; // group-attention factor
  124. int32_t ga_w = 512; // group-attention width
  125. int32_t n_past_se = 0; // self-extend
  126. // multimodal
  127. std::vector<slot_image> images;
  128. // stats
  129. size_t n_sent_text = 0; // number of sent text character
  130. size_t n_sent_token_probs = 0;
  131. int64_t t_start_process_prompt;
  132. int64_t t_start_genereration;
  133. double t_prompt_processing; // ms
  134. double t_token_generation; // ms
  135. // multitasks
  136. int multitask_id = -1;
  137. void reset() {
  138. n_prompt_tokens = 0;
  139. generated_text = "";
  140. truncated = false;
  141. stopped_eos = false;
  142. stopped_word = false;
  143. stopped_limit = false;
  144. stopping_word = "";
  145. n_past = 0;
  146. n_sent_text = 0;
  147. n_sent_token_probs = 0;
  148. infill = false;
  149. ga_i = 0;
  150. n_past_se = 0;
  151. generated_token_probs.clear();
  152. for (slot_image & img : images) {
  153. free(img.image_embedding);
  154. if (img.img_data) {
  155. clip_image_u8_free(img.img_data);
  156. }
  157. img.prefix_prompt = "";
  158. }
  159. images.clear();
  160. }
  161. bool has_budget(gpt_params &global_params) {
  162. if (params.n_predict == -1 && global_params.n_predict == -1) {
  163. return true; // limitless
  164. }
  165. n_remaining = -1;
  166. if (params.n_predict != -1) {
  167. n_remaining = params.n_predict - n_decoded;
  168. } else if (global_params.n_predict != -1) {
  169. n_remaining = global_params.n_predict - n_decoded;
  170. }
  171. return n_remaining > 0; // no budget
  172. }
  173. bool available() const {
  174. return state == IDLE && command == NONE;
  175. }
  176. bool is_processing() const {
  177. return (state == IDLE && command == LOAD_PROMPT) || state == PROCESSING;
  178. }
  179. void add_token_string(const completion_token_output &token) {
  180. if (command == RELEASE) {
  181. return;
  182. }
  183. cache_tokens.push_back(token.tok);
  184. generated_token_probs.push_back(token);
  185. }
  186. void release() {
  187. if (state == PROCESSING)
  188. {
  189. t_token_generation = (ggml_time_us() - t_start_genereration) / 1e3;
  190. command = RELEASE;
  191. }
  192. }
  193. json get_formated_timings() {
  194. return json
  195. {
  196. {"prompt_n", n_prompt_tokens_processed},
  197. {"prompt_ms", t_prompt_processing},
  198. {"prompt_per_token_ms", t_prompt_processing / n_prompt_tokens_processed},
  199. {"prompt_per_second", 1e3 / t_prompt_processing * n_prompt_tokens_processed},
  200. {"predicted_n", n_decoded},
  201. {"predicted_ms", t_token_generation},
  202. {"predicted_per_token_ms", t_token_generation / n_decoded},
  203. {"predicted_per_second", 1e3 / t_token_generation * n_decoded},
  204. };
  205. }
  206. void print_timings() const {
  207. char buffer[512];
  208. double t_token = t_prompt_processing / n_prompt_tokens_processed;
  209. double n_tokens_second = 1e3 / t_prompt_processing * n_prompt_tokens_processed;
  210. sprintf(buffer, "prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)",
  211. t_prompt_processing, n_prompt_tokens_processed,
  212. t_token, n_tokens_second);
  213. LOG_DEBUG(buffer, {
  214. {"slot_id", id},
  215. {"task_id", task_id},
  216. {"t_prompt_processing", t_prompt_processing},
  217. {"n_prompt_tokens_processed", n_prompt_tokens_processed},
  218. {"t_token", t_token},
  219. {"n_tokens_second", n_tokens_second},
  220. });
  221. t_token = t_token_generation / n_decoded;
  222. n_tokens_second = 1e3 / t_token_generation * n_decoded;
  223. sprintf(buffer, "generation eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)",
  224. t_token_generation, n_decoded,
  225. t_token, n_tokens_second);
  226. LOG_DEBUG(buffer, {
  227. {"slot_id", id},
  228. {"task_id", task_id},
  229. {"t_token_generation", t_token_generation},
  230. {"n_decoded", n_decoded},
  231. {"t_token", t_token},
  232. {"n_tokens_second", n_tokens_second},
  233. });
  234. sprintf(buffer, " total time = %10.2f ms", t_prompt_processing + t_token_generation);
  235. LOG_DEBUG(buffer, {
  236. {"slot_id", id},
  237. {"task_id", task_id},
  238. {"t_prompt_processing", t_prompt_processing},
  239. {"t_token_generation", t_token_generation},
  240. {"t_total", t_prompt_processing + t_token_generation},
  241. });
  242. }
  243. };
  244. struct server_metrics {
  245. uint64_t n_prompt_tokens_processed_total = 0;
  246. uint64_t n_tokens_predicted_total = 0;
  247. uint64_t n_prompt_tokens_processed = 0;
  248. uint64_t t_prompt_processing = 0;
  249. uint64_t n_tokens_predicted = 0;
  250. uint64_t t_tokens_generation = 0;
  251. void on_prompt_eval(const server_slot &slot) {
  252. n_prompt_tokens_processed_total += slot.n_prompt_tokens_processed;
  253. n_prompt_tokens_processed += slot.n_prompt_tokens_processed;
  254. t_prompt_processing += slot.t_prompt_processing;
  255. }
  256. void on_prediction(const server_slot &slot) {
  257. n_tokens_predicted_total += slot.n_decoded;
  258. n_tokens_predicted += slot.n_decoded;
  259. t_tokens_generation += slot.t_token_generation;
  260. }
  261. void reset_bucket() {
  262. n_prompt_tokens_processed = 0;
  263. t_prompt_processing = 0;
  264. n_tokens_predicted = 0;
  265. t_tokens_generation = 0;
  266. }
  267. };
  268. struct llama_server_context
  269. {
  270. llama_model *model = nullptr;
  271. float modelProgress = 0.0;
  272. llama_context *ctx = nullptr;
  273. clip_ctx *clp_ctx = nullptr;
  274. gpt_params params;
  275. llama_batch batch;
  276. bool multimodal = false;
  277. bool clean_kv_cache = true;
  278. bool all_slots_are_idle = false;
  279. bool add_bos_token = true;
  280. int32_t n_ctx; // total context for all clients / slots
  281. // system prompt
  282. bool system_need_update = false;
  283. std::string system_prompt;
  284. std::vector<llama_token> system_tokens;
  285. std::string name_user; // this should be the antiprompt
  286. std::string name_assistant;
  287. // slots / clients
  288. std::vector<server_slot> slots;
  289. json default_generation_settings_for_props;
  290. llama_server_queue queue_tasks;
  291. llama_server_response queue_results;
  292. server_metrics metrics;
  293. ~llama_server_context()
  294. {
  295. if (clp_ctx)
  296. {
  297. LOG_DEBUG("freeing clip model", {});
  298. clip_free(clp_ctx);
  299. clp_ctx = nullptr;
  300. }
  301. if (ctx)
  302. {
  303. llama_free(ctx);
  304. ctx = nullptr;
  305. }
  306. if (model)
  307. {
  308. llama_free_model(model);
  309. model = nullptr;
  310. }
  311. }
  312. bool load_model(const gpt_params &params_)
  313. {
  314. params = params_;
  315. if (!params.mmproj.empty()) {
  316. multimodal = true;
  317. LOG_DEBUG("Multi Modal Mode Enabled", {});
  318. clp_ctx = clip_model_load(params.mmproj.c_str(), /*verbosity=*/ 1);
  319. if(clp_ctx == nullptr) {
  320. LOG_ERROR("unable to load clip model", {{"model", params.mmproj}});
  321. return false;
  322. }
  323. if (params.n_ctx < 2048) { // request larger context for the image embedding
  324. params.n_ctx = 2048;
  325. }
  326. }
  327. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  328. if (model == nullptr)
  329. {
  330. LOG_ERROR("unable to load model", {{"model", params.model}});
  331. return false;
  332. }
  333. if (multimodal) {
  334. const int n_embd_clip = clip_n_mmproj_embd(clp_ctx);
  335. const int n_embd_llm = llama_n_embd(model);
  336. if (n_embd_clip != n_embd_llm) {
  337. LOG_TEE("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_embd_clip, n_embd_llm);
  338. llama_free(ctx);
  339. llama_free_model(model);
  340. return false;
  341. }
  342. }
  343. n_ctx = llama_n_ctx(ctx);
  344. add_bos_token = llama_should_add_bos_token(model);
  345. return true;
  346. }
  347. void validate_model_chat_template(server_params & sparams) {
  348. llama_chat_message chat[] = {{"user", "test"}};
  349. std::vector<char> buf(1);
  350. int res = llama_chat_apply_template(model, nullptr, chat, 1, true, buf.data(), buf.size());
  351. if (res < 0) {
  352. LOG_ERROR("The chat template comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", {});
  353. sparams.chat_template = "chatml";
  354. }
  355. }
  356. void initialize() {
  357. // create slots
  358. all_slots_are_idle = true;
  359. const int32_t n_ctx_slot = n_ctx / params.n_parallel;
  360. LOG_DEBUG("initializing slots", {{"n_slots", params.n_parallel}});
  361. for (int i = 0; i < params.n_parallel; i++)
  362. {
  363. server_slot slot;
  364. slot.id = i;
  365. slot.n_ctx = n_ctx_slot;
  366. slot.n_predict = params.n_predict;
  367. LOG_DEBUG("new slot", {
  368. {"slot_id", slot.id},
  369. {"n_ctx_slot", slot.n_ctx}
  370. });
  371. const int ga_n = params.grp_attn_n;
  372. const int ga_w = params.grp_attn_w;
  373. if (ga_n != 1) {
  374. GGML_ASSERT(ga_n > 0 && "ga_n must be positive"); // NOLINT
  375. GGML_ASSERT(ga_w % ga_n == 0 && "ga_w must be a multiple of ga_n"); // NOLINT
  376. //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of ga_w"); // NOLINT
  377. //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * ga_n"); // NOLINT
  378. LOG_DEBUG("slot self-extend", {
  379. {"slot_id", slot.id},
  380. {"ga_n", ga_n},
  381. {"ga_w", ga_w}
  382. });
  383. }
  384. slot.ga_i = 0;
  385. slot.ga_n = ga_n;
  386. slot.ga_w = ga_w;
  387. slot.reset();
  388. slots.push_back(slot);
  389. }
  390. default_generation_settings_for_props = get_formated_generation(slots.front());
  391. default_generation_settings_for_props["seed"] = -1;
  392. batch = llama_batch_init(n_ctx, 0, params.n_parallel);
  393. }
  394. std::vector<llama_token> tokenize(const json & json_prompt, bool add_bos) const
  395. {
  396. // TODO: currently, we tokenize using special tokens by default
  397. // this is not always correct (see https://github.com/ggerganov/llama.cpp/pull/4160#issuecomment-1824826216)
  398. // but it's better compared to completely ignoring ChatML and other chat templates
  399. const bool TMP_FORCE_SPECIAL = true;
  400. // If `add_bos` is true, we only add BOS, when json_prompt is a string,
  401. // or the first element of the json_prompt array is a string.
  402. std::vector<llama_token> prompt_tokens;
  403. if (json_prompt.is_array())
  404. {
  405. bool first = true;
  406. for (const auto& p : json_prompt)
  407. {
  408. if (p.is_string())
  409. {
  410. auto s = p.template get<std::string>();
  411. std::vector<llama_token> p;
  412. if (first)
  413. {
  414. p = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL);
  415. first = false;
  416. }
  417. else
  418. {
  419. p = ::llama_tokenize(ctx, s, false, TMP_FORCE_SPECIAL);
  420. }
  421. prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
  422. }
  423. else
  424. {
  425. if (first)
  426. {
  427. first = false;
  428. }
  429. prompt_tokens.push_back(p.template get<llama_token>());
  430. }
  431. }
  432. }
  433. else
  434. {
  435. auto s = json_prompt.template get<std::string>();
  436. prompt_tokens = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL);
  437. }
  438. return prompt_tokens;
  439. }
  440. server_slot* get_slot(int id) {
  441. int64_t t_last = ggml_time_us();
  442. server_slot *last_used = nullptr;
  443. for (server_slot & slot : slots)
  444. {
  445. if (slot.id == id && slot.available())
  446. {
  447. return &slot;
  448. }
  449. if (slot.available() && slot.t_last_used < t_last)
  450. {
  451. last_used = &slot;
  452. t_last = slot.t_last_used;
  453. }
  454. }
  455. return last_used;
  456. }
  457. bool launch_slot_with_data(server_slot* &slot, json data) {
  458. slot_params default_params;
  459. llama_sampling_params default_sparams;
  460. slot->params.stream = json_value(data, "stream", false);
  461. slot->params.cache_prompt = json_value(data, "cache_prompt", false);
  462. slot->params.n_predict = json_value(data, "n_predict", default_params.n_predict);
  463. slot->sparams.top_k = json_value(data, "top_k", default_sparams.top_k);
  464. slot->sparams.top_p = json_value(data, "top_p", default_sparams.top_p);
  465. slot->sparams.min_p = json_value(data, "min_p", default_sparams.min_p);
  466. slot->sparams.tfs_z = json_value(data, "tfs_z", default_sparams.tfs_z);
  467. slot->sparams.typical_p = json_value(data, "typical_p", default_sparams.typical_p);
  468. slot->sparams.temp = json_value(data, "temperature", default_sparams.temp);
  469. slot->sparams.dynatemp_range = json_value(data, "dynatemp_range", default_sparams.dynatemp_range);
  470. slot->sparams.dynatemp_exponent = json_value(data, "dynatemp_exponent", default_sparams.dynatemp_exponent);
  471. slot->sparams.penalty_last_n = json_value(data, "repeat_last_n", default_sparams.penalty_last_n);
  472. slot->sparams.penalty_repeat = json_value(data, "repeat_penalty", default_sparams.penalty_repeat);
  473. slot->sparams.penalty_freq = json_value(data, "frequency_penalty", default_sparams.penalty_freq);
  474. slot->sparams.penalty_present = json_value(data, "presence_penalty", default_sparams.penalty_present);
  475. slot->sparams.mirostat = json_value(data, "mirostat", default_sparams.mirostat);
  476. slot->sparams.mirostat_tau = json_value(data, "mirostat_tau", default_sparams.mirostat_tau);
  477. slot->sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
  478. slot->sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
  479. slot->params.n_keep = json_value(data, "n_keep", slot->params.n_keep);
  480. slot->params.seed = json_value(data, "seed", default_params.seed);
  481. slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
  482. slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
  483. slot->sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
  484. if (slot->n_predict > 0 && slot->params.n_predict > slot->n_predict) {
  485. // Might be better to reject the request with a 400 ?
  486. LOG_WARNING("Max tokens to predict exceeds server configuration", {
  487. {"params.n_predict", slot->params.n_predict},
  488. {"slot.n_predict", slot->n_predict},
  489. });
  490. slot->params.n_predict = slot->n_predict;
  491. }
  492. // infill
  493. if (data.count("input_prefix") != 0)
  494. {
  495. slot->params.input_prefix = data["input_prefix"];
  496. }
  497. else
  498. {
  499. slot->params.input_prefix = "";
  500. }
  501. if (data.count("input_suffix") != 0)
  502. {
  503. slot->params.input_suffix = data["input_suffix"];
  504. }
  505. else
  506. {
  507. slot->params.input_suffix = "";
  508. }
  509. if (data.count("prompt") != 0)
  510. {
  511. slot->prompt = data["prompt"];
  512. }
  513. else
  514. {
  515. slot->prompt = "";
  516. }
  517. slot->sparams.penalty_prompt_tokens.clear();
  518. slot->sparams.use_penalty_prompt_tokens = false;
  519. const auto &penalty_prompt = data.find("penalty_prompt");
  520. if (penalty_prompt != data.end())
  521. {
  522. if (penalty_prompt->is_string())
  523. {
  524. const auto penalty_prompt_string = penalty_prompt->get<std::string>();
  525. auto penalty_tokens = llama_tokenize(model, penalty_prompt_string, false);
  526. slot->sparams.penalty_prompt_tokens.swap(penalty_tokens);
  527. if (slot->params.n_predict > 0)
  528. {
  529. slot->sparams.penalty_prompt_tokens.reserve(slot->sparams.penalty_prompt_tokens.size() + slot->params.n_predict);
  530. }
  531. slot->sparams.use_penalty_prompt_tokens = true;
  532. }
  533. else if (penalty_prompt->is_array())
  534. {
  535. const auto n_tokens = penalty_prompt->size();
  536. slot->sparams.penalty_prompt_tokens.reserve(n_tokens + std::max(0, slot->params.n_predict));
  537. const int n_vocab = llama_n_vocab(model);
  538. for (const auto &penalty_token : *penalty_prompt)
  539. {
  540. if (penalty_token.is_number_integer())
  541. {
  542. const auto tok = penalty_token.get<llama_token>();
  543. if (tok >= 0 && tok < n_vocab)
  544. {
  545. slot->sparams.penalty_prompt_tokens.push_back(tok);
  546. }
  547. }
  548. }
  549. slot->sparams.use_penalty_prompt_tokens = true;
  550. }
  551. }
  552. slot->sparams.logit_bias.clear();
  553. if (json_value(data, "ignore_eos", false))
  554. {
  555. slot->sparams.logit_bias[llama_token_eos(model)] = -INFINITY;
  556. }
  557. const auto &logit_bias = data.find("logit_bias");
  558. if (logit_bias != data.end() && logit_bias->is_array())
  559. {
  560. const int n_vocab = llama_n_vocab(model);
  561. for (const auto &el : *logit_bias)
  562. {
  563. if (el.is_array() && el.size() == 2)
  564. {
  565. float bias;
  566. if (el[1].is_number())
  567. {
  568. bias = el[1].get<float>();
  569. }
  570. else if (el[1].is_boolean() && !el[1].get<bool>())
  571. {
  572. bias = -INFINITY;
  573. }
  574. else
  575. {
  576. continue;
  577. }
  578. if (el[0].is_number_integer())
  579. {
  580. llama_token tok = el[0].get<llama_token>();
  581. if (tok >= 0 && tok < n_vocab)
  582. {
  583. slot->sparams.logit_bias[tok] = bias;
  584. }
  585. }
  586. else if (el[0].is_string())
  587. {
  588. auto toks = llama_tokenize(model, el[0].get<std::string>(), false);
  589. for (auto tok : toks)
  590. {
  591. slot->sparams.logit_bias[tok] = bias;
  592. }
  593. }
  594. }
  595. }
  596. }
  597. slot->params.antiprompt.clear();
  598. const auto &stop = data.find("stop");
  599. if (stop != data.end() && stop->is_array())
  600. {
  601. for (const auto &word : *stop)
  602. {
  603. if (!word.empty())
  604. {
  605. slot->params.antiprompt.push_back(word);
  606. }
  607. }
  608. }
  609. const auto &samplers_sequence = data.find("samplers");
  610. if (samplers_sequence != data.end() && samplers_sequence->is_array())
  611. {
  612. std::vector<std::string> sampler_names;
  613. for (const auto &sampler_name : *samplers_sequence)
  614. {
  615. if (sampler_name.is_string())
  616. {
  617. sampler_names.emplace_back(sampler_name);
  618. }
  619. }
  620. slot->sparams.samplers_sequence = llama_sampling_types_from_names(sampler_names, false);
  621. }
  622. else
  623. {
  624. slot->sparams.samplers_sequence = default_sparams.samplers_sequence;
  625. }
  626. if (multimodal)
  627. {
  628. const auto &images_data = data.find("image_data");
  629. if (images_data != data.end() && images_data->is_array())
  630. {
  631. for (const auto &img : *images_data)
  632. {
  633. const std::vector<uint8_t> image_buffer = base64_decode(img["data"].get<std::string>());
  634. slot_image img_sl;
  635. img_sl.id = img.count("id") != 0 ? img["id"].get<int>() : slot->images.size();
  636. img_sl.img_data = clip_image_u8_init();
  637. if (!clip_image_load_from_bytes(image_buffer.data(), image_buffer.size(), img_sl.img_data))
  638. {
  639. LOG_ERROR("failed to load image", {
  640. {"slot_id", slot->id},
  641. {"img_sl_id", img_sl.id}
  642. });
  643. return false;
  644. }
  645. LOG_VERBOSE("image loaded", {
  646. {"slot_id", slot->id},
  647. {"img_sl_id", img_sl.id}
  648. });
  649. img_sl.request_encode_image = true;
  650. slot->images.push_back(img_sl);
  651. }
  652. // process prompt
  653. // example: system prompt [img-102] user [img-103] describe [img-134] -> [{id: 102, prefix: 'system prompt '}, {id: 103, prefix: ' user '}, {id: 134, prefix: ' describe '}]}
  654. if (slot->images.size() > 0 && !slot->prompt.is_array())
  655. {
  656. std::string prompt = slot->prompt.get<std::string>();
  657. size_t pos = 0, begin_prefix = 0;
  658. std::string pattern = "[img-";
  659. while ((pos = prompt.find(pattern, pos)) != std::string::npos) {
  660. size_t end_prefix = pos;
  661. pos += pattern.length();
  662. size_t end_pos = prompt.find(']', pos);
  663. if (end_pos != std::string::npos)
  664. {
  665. std::string image_id = prompt.substr(pos, end_pos - pos);
  666. try
  667. {
  668. int img_id = std::stoi(image_id);
  669. bool found = false;
  670. for (slot_image &img : slot->images)
  671. {
  672. if (img.id == img_id) {
  673. found = true;
  674. img.prefix_prompt = prompt.substr(begin_prefix, end_prefix - begin_prefix);
  675. begin_prefix = end_pos + 1;
  676. break;
  677. }
  678. }
  679. if (!found) {
  680. LOG_TEE("ERROR: Image with id: %i, not found.\n", img_id);
  681. slot->images.clear();
  682. return false;
  683. }
  684. } catch (const std::invalid_argument& e) {
  685. LOG_TEE("Invalid image number id in prompt\n");
  686. slot->images.clear();
  687. return false;
  688. }
  689. }
  690. }
  691. slot->prompt = "";
  692. slot->params.input_suffix = prompt.substr(begin_prefix);
  693. slot->params.cache_prompt = false; // multimodal doesn't support cache prompt
  694. }
  695. }
  696. }
  697. if (slot->ctx_sampling != nullptr)
  698. {
  699. llama_sampling_free(slot->ctx_sampling);
  700. }
  701. slot->ctx_sampling = llama_sampling_init(slot->sparams);
  702. llama_set_rng_seed(ctx, slot->params.seed);
  703. slot->command = LOAD_PROMPT;
  704. all_slots_are_idle = false;
  705. LOG_DEBUG("slot is processing task", {
  706. {"slot_id", slot->id},
  707. {"task_id", slot->task_id},
  708. });
  709. return true;
  710. }
  711. void kv_cache_clear() {
  712. // clear the entire KV cache
  713. llama_kv_cache_clear(ctx);
  714. clean_kv_cache = false;
  715. }
  716. void system_prompt_update() {
  717. kv_cache_clear();
  718. system_tokens.clear();
  719. if (!system_prompt.empty()) {
  720. system_tokens = ::llama_tokenize(ctx, system_prompt, add_bos_token);
  721. llama_batch_clear(batch);
  722. for (int i = 0; i < (int)system_tokens.size(); ++i)
  723. {
  724. llama_batch_add(batch, system_tokens[i], i, { 0 }, false);
  725. }
  726. for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += params.n_batch)
  727. {
  728. const int32_t n_tokens = std::min(params.n_batch, (int32_t) (batch.n_tokens - i));
  729. llama_batch batch_view = {
  730. n_tokens,
  731. batch.token + i,
  732. nullptr,
  733. batch.pos + i,
  734. batch.n_seq_id + i,
  735. batch.seq_id + i,
  736. batch.logits + i,
  737. 0, 0, 0, // unused
  738. };
  739. if (llama_decode(ctx, batch_view) != 0)
  740. {
  741. LOG_TEE("%s: llama_decode() failed\n", __func__);
  742. return;
  743. }
  744. }
  745. // assign the system KV cache to all parallel sequences
  746. for (int32_t i = 1; i < params.n_parallel; ++i)
  747. {
  748. llama_kv_cache_seq_cp(ctx, 0, i, 0, system_tokens.size());
  749. }
  750. }
  751. LOG_TEE("system prompt updated\n");
  752. system_need_update = false;
  753. }
  754. void system_prompt_notify() {
  755. // release all slots
  756. for (server_slot &slot : slots)
  757. {
  758. slot.release();
  759. }
  760. system_need_update = true;
  761. }
  762. void system_prompt_process(const json &sys_props) {
  763. system_prompt = sys_props.value("prompt", "");
  764. name_user = sys_props.value("anti_prompt", "");
  765. name_assistant = sys_props.value("assistant_name", "");
  766. system_prompt_notify();
  767. }
  768. static size_t find_stopping_strings(const std::string &text, const size_t last_token_size,
  769. const stop_type type, server_slot &slot)
  770. {
  771. size_t stop_pos = std::string::npos;
  772. for (const std::string &word : slot.params.antiprompt)
  773. {
  774. size_t pos;
  775. if (type == STOP_FULL)
  776. {
  777. const size_t tmp = word.size() + last_token_size;
  778. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  779. pos = text.find(word, from_pos);
  780. }
  781. else
  782. {
  783. pos = find_partial_stop_string(word, text);
  784. }
  785. if (pos != std::string::npos &&
  786. (stop_pos == std::string::npos || pos < stop_pos))
  787. {
  788. if (type == STOP_FULL)
  789. {
  790. slot.stopped_word = true;
  791. slot.stopping_word = word;
  792. slot.has_next_token = false;
  793. }
  794. stop_pos = pos;
  795. }
  796. }
  797. return stop_pos;
  798. }
  799. bool process_token(completion_token_output &result, server_slot &slot) {
  800. // remember which tokens were sampled - used for repetition penalties during sampling
  801. const std::string token_str = llama_token_to_piece(ctx, result.tok);
  802. slot.sampled = result.tok;
  803. // search stop word and delete it
  804. slot.generated_text += token_str;
  805. slot.has_next_token = true;
  806. if (slot.ctx_sampling->params.use_penalty_prompt_tokens && result.tok != -1)
  807. {
  808. // we can change penalty_prompt_tokens because it is always created from scratch each request
  809. slot.ctx_sampling->params.penalty_prompt_tokens.push_back(result.tok);
  810. }
  811. // check if there is incomplete UTF-8 character at the end
  812. bool incomplete = false;
  813. for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i)
  814. {
  815. unsigned char c = slot.generated_text[slot.generated_text.size() - i];
  816. if ((c & 0xC0) == 0x80)
  817. {
  818. // continuation byte: 10xxxxxx
  819. continue;
  820. }
  821. if ((c & 0xE0) == 0xC0)
  822. {
  823. // 2-byte character: 110xxxxx ...
  824. incomplete = i < 2;
  825. }
  826. else if ((c & 0xF0) == 0xE0)
  827. {
  828. // 3-byte character: 1110xxxx ...
  829. incomplete = i < 3;
  830. }
  831. else if ((c & 0xF8) == 0xF0)
  832. {
  833. // 4-byte character: 11110xxx ...
  834. incomplete = i < 4;
  835. }
  836. // else 1-byte character or invalid byte
  837. break;
  838. }
  839. if (!incomplete)
  840. {
  841. size_t pos = std::min(slot.n_sent_text, slot.generated_text.size());
  842. const std::string str_test = slot.generated_text.substr(pos);
  843. bool is_stop_full = false;
  844. size_t stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_FULL, slot);
  845. if (stop_pos != std::string::npos)
  846. {
  847. is_stop_full = true;
  848. slot.generated_text.erase(
  849. slot.generated_text.begin() + pos + stop_pos,
  850. slot.generated_text.end());
  851. pos = std::min(slot.n_sent_text, slot.generated_text.size());
  852. }
  853. else
  854. {
  855. is_stop_full = false;
  856. stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_PARTIAL, slot);
  857. }
  858. // check if there is any token to predict
  859. if (stop_pos == std::string::npos || (!slot.has_next_token && !is_stop_full && stop_pos > 0))
  860. {
  861. // no send the stop word in the response
  862. result.text_to_send = slot.generated_text.substr(pos, std::string::npos);
  863. slot.n_sent_text += result.text_to_send.size();
  864. // add the token to slot queue and cache
  865. }
  866. if (slot.params.stream)
  867. {
  868. send_partial_response(slot, result);
  869. }
  870. }
  871. slot.add_token_string(result);
  872. if (incomplete)
  873. {
  874. slot.has_next_token = true;
  875. }
  876. // check the limits
  877. if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params))
  878. {
  879. slot.stopped_limit = true;
  880. slot.has_next_token = false;
  881. }
  882. if (!slot.cache_tokens.empty() && llama_token_is_eog(model, result.tok))
  883. {
  884. slot.stopped_eos = true;
  885. slot.has_next_token = false;
  886. LOG_VERBOSE("eos token found", {});
  887. }
  888. LOG_VERBOSE("next token", {
  889. {"token", result.tok},
  890. {"token_text", tokens_to_output_formatted_string(ctx, result.tok)},
  891. {"has_next_token", slot.has_next_token},
  892. {"n_remain", slot.n_remaining},
  893. {"num_tokens_predicted", slot.n_decoded},
  894. {"stopped_eos", slot.stopped_eos},
  895. {"stopped_word", slot.stopped_word},
  896. {"stopped_limit", slot.stopped_limit},
  897. {"stopping_word", slot.stopping_word},
  898. });
  899. return slot.has_next_token; // continue
  900. }
  901. bool process_images(server_slot &slot) const
  902. {
  903. for (slot_image &img : slot.images)
  904. {
  905. if (!img.request_encode_image)
  906. {
  907. continue;
  908. }
  909. if (!llava_image_embed_make_with_clip_img(clp_ctx, params.n_threads, img.img_data, &img.image_embedding, &img.image_tokens)) {
  910. LOG_TEE("Error processing the given image");
  911. return false;
  912. }
  913. img.request_encode_image = false;
  914. }
  915. return slot.images.size() > 0;
  916. }
  917. void send_error(task_server& task, const std::string &error)
  918. {
  919. LOG_TEE("task %i - error: %s\n", task.id, error.c_str());
  920. task_result res;
  921. res.id = task.id;
  922. res.multitask_id = task.multitask_id;
  923. res.stop = false;
  924. res.error = true;
  925. res.result_json = { { "content", error } };
  926. queue_results.send(res);
  927. }
  928. json get_formated_generation(server_slot &slot)
  929. {
  930. const auto eos_bias = slot.sparams.logit_bias.find(llama_token_eos(model));
  931. const bool ignore_eos = eos_bias != slot.sparams.logit_bias.end() &&
  932. eos_bias->second < 0.0f && std::isinf(eos_bias->second);
  933. std::vector<std::string> samplers_sequence;
  934. for (const auto &sampler_type : slot.sparams.samplers_sequence)
  935. {
  936. samplers_sequence.emplace_back(llama_sampling_type_to_str(sampler_type));
  937. }
  938. return json {
  939. {"n_ctx", slot.n_ctx},
  940. {"n_predict", slot.n_predict},
  941. {"model", params.model_alias},
  942. {"seed", slot.params.seed},
  943. {"temperature", slot.sparams.temp},
  944. {"dynatemp_range", slot.sparams.dynatemp_range},
  945. {"dynatemp_exponent", slot.sparams.dynatemp_exponent},
  946. {"top_k", slot.sparams.top_k},
  947. {"top_p", slot.sparams.top_p},
  948. {"min_p", slot.sparams.min_p},
  949. {"tfs_z", slot.sparams.tfs_z},
  950. {"typical_p", slot.sparams.typical_p},
  951. {"repeat_last_n", slot.sparams.penalty_last_n},
  952. {"repeat_penalty", slot.sparams.penalty_repeat},
  953. {"presence_penalty", slot.sparams.penalty_present},
  954. {"frequency_penalty", slot.sparams.penalty_freq},
  955. {"penalty_prompt_tokens", slot.sparams.penalty_prompt_tokens},
  956. {"use_penalty_prompt_tokens", slot.sparams.use_penalty_prompt_tokens},
  957. {"mirostat", slot.sparams.mirostat},
  958. {"mirostat_tau", slot.sparams.mirostat_tau},
  959. {"mirostat_eta", slot.sparams.mirostat_eta},
  960. {"penalize_nl", slot.sparams.penalize_nl},
  961. {"stop", slot.params.antiprompt},
  962. {"n_predict", slot.params.n_predict},
  963. {"n_keep", params.n_keep},
  964. {"ignore_eos", ignore_eos},
  965. {"stream", slot.params.stream},
  966. {"logit_bias", slot.sparams.logit_bias},
  967. {"n_probs", slot.sparams.n_probs},
  968. {"min_keep", slot.sparams.min_keep},
  969. {"grammar", slot.sparams.grammar},
  970. {"samplers", samplers_sequence}
  971. };
  972. }
  973. void send_partial_response(server_slot &slot, completion_token_output tkn)
  974. {
  975. task_result res;
  976. res.id = slot.task_id;
  977. res.multitask_id = slot.multitask_id;
  978. res.error = false;
  979. res.stop = false;
  980. res.result_json = json
  981. {
  982. {"stop", false},
  983. {"slot_id", slot.id},
  984. {"multimodal", multimodal}
  985. };
  986. if (!llama_token_is_eog(model, tkn.tok)) {
  987. res.result_json["content"] = tkn.text_to_send;
  988. }
  989. if (slot.sparams.n_probs > 0)
  990. {
  991. std::vector<completion_token_output> probs_output = {};
  992. const std::vector<llama_token> to_send_toks = llama_tokenize(ctx, tkn.text_to_send, false);
  993. size_t probs_pos = std::min(slot.n_sent_token_probs, slot.generated_token_probs.size());
  994. size_t probs_stop_pos = std::min(slot.n_sent_token_probs + to_send_toks.size(), slot.generated_token_probs.size());
  995. if (probs_pos < probs_stop_pos)
  996. {
  997. probs_output = std::vector<completion_token_output>(slot.generated_token_probs.begin() + probs_pos, slot.generated_token_probs.begin() + probs_stop_pos);
  998. }
  999. slot.n_sent_token_probs = probs_stop_pos;
  1000. res.result_json["completion_probabilities"] = probs_vector_to_json(ctx, probs_output);
  1001. }
  1002. queue_results.send(res);
  1003. }
  1004. void send_final_response(server_slot &slot)
  1005. {
  1006. task_result res;
  1007. res.id = slot.task_id;
  1008. res.multitask_id = slot.multitask_id;
  1009. res.error = false;
  1010. res.stop = true;
  1011. res.result_json = json
  1012. {
  1013. {"content", !slot.params.stream ? slot.generated_text : ""},
  1014. {"slot_id", slot.id},
  1015. {"stop", true},
  1016. {"model", params.model_alias},
  1017. {"tokens_predicted", slot.n_decoded},
  1018. {"tokens_evaluated", slot.n_prompt_tokens},
  1019. {"truncated", slot.truncated},
  1020. {"stopped_eos", slot.stopped_eos},
  1021. {"stopped_word", slot.stopped_word},
  1022. {"stopped_limit", slot.stopped_limit},
  1023. {"stopping_word", slot.stopping_word},
  1024. {"tokens_cached", slot.n_past},
  1025. {"timings", slot.get_formated_timings()}
  1026. };
  1027. if (slot.sparams.n_probs > 0)
  1028. {
  1029. std::vector<completion_token_output> probs = {};
  1030. if (!slot.params.stream && slot.stopped_word)
  1031. {
  1032. const std::vector<llama_token> stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false);
  1033. probs = std::vector<completion_token_output>(slot.generated_token_probs.begin(), slot.generated_token_probs.end() - stop_word_toks.size());
  1034. }
  1035. else
  1036. {
  1037. probs = std::vector<completion_token_output>(
  1038. slot.generated_token_probs.begin(),
  1039. slot.generated_token_probs.end());
  1040. }
  1041. res.result_json["completion_probabilities"] = probs_vector_to_json(ctx, probs);
  1042. }
  1043. queue_results.send(res);
  1044. }
  1045. void send_embedding(server_slot & slot, const llama_batch & batch)
  1046. {
  1047. task_result res;
  1048. res.id = slot.task_id;
  1049. res.multitask_id = slot.multitask_id;
  1050. res.error = false;
  1051. res.stop = true;
  1052. const int n_embd = llama_n_embd(model);
  1053. if (!params.embedding)
  1054. {
  1055. LOG_WARNING("embedding disabled", {{"params.embedding", params.embedding}});
  1056. res.result_json = json
  1057. {
  1058. {"embedding", std::vector<float>(n_embd, 0.0f)},
  1059. };
  1060. }
  1061. else
  1062. {
  1063. for (int i = 0; i < batch.n_tokens; ++i) {
  1064. if (!batch.logits[i] || batch.seq_id[i][0] != slot.id) {
  1065. continue;
  1066. }
  1067. const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
  1068. if (embd == NULL) {
  1069. embd = llama_get_embeddings_ith(ctx, i);
  1070. if (embd == NULL) {
  1071. LOG_ERROR("failed to get embeddings for token", {{"token", batch.token[i]}, {"seq_id", batch.seq_id[i][0]}});
  1072. res.result_json = json
  1073. {
  1074. {"embedding", std::vector<float>(n_embd, 0.0f)},
  1075. };
  1076. continue;
  1077. }
  1078. }
  1079. res.result_json = json
  1080. {
  1081. {"embedding", std::vector<float>(embd, embd + n_embd)},
  1082. };
  1083. }
  1084. }
  1085. queue_results.send(res);
  1086. }
  1087. void request_completion(int task_id, json data, bool infill, bool embedding, int multitask_id)
  1088. {
  1089. task_server task;
  1090. task.id = task_id;
  1091. task.target_id = 0;
  1092. task.data = std::move(data);
  1093. task.infill_mode = infill;
  1094. task.embedding_mode = embedding;
  1095. task.type = TASK_TYPE_COMPLETION;
  1096. task.multitask_id = multitask_id;
  1097. // when a completion task's prompt array is not a singleton, we split it into multiple requests
  1098. // otherwise, it's a single-prompt task, we actually queue it
  1099. // if there's numbers in the prompt array it will be treated as an array of tokens
  1100. if (task.data.count("prompt") != 0 && task.data.at("prompt").size() > 1) {
  1101. bool numbers = false;
  1102. for (const auto& e : task.data.at("prompt")) {
  1103. if (e.is_number()) {
  1104. numbers = true;
  1105. break;
  1106. }
  1107. }
  1108. // NOTE: split_multiprompt_task() does not handle a mix of strings and numbers,
  1109. // it will completely stall the server. I don't know where the bug for this is.
  1110. //
  1111. // if there are numbers, it needs to be treated like a single prompt,
  1112. // queue_tasks handles a mix of strings and numbers just fine.
  1113. if (numbers) {
  1114. queue_tasks.post(task);
  1115. } else {
  1116. split_multiprompt_task(task_id, task);
  1117. }
  1118. } else {
  1119. // an empty prompt can make slot become buggy
  1120. if (task.data.contains("prompt") && task.data["prompt"].is_string() && task.data["prompt"].get<std::string>().empty()) {
  1121. task.data["prompt"] = " "; // add a space so that we have one token
  1122. }
  1123. queue_tasks.post(task);
  1124. }
  1125. }
  1126. // for multiple images processing
  1127. bool ingest_images(server_slot &slot, int n_batch)
  1128. {
  1129. int image_idx = 0;
  1130. while (image_idx < (int) slot.images.size())
  1131. {
  1132. slot_image &img = slot.images[image_idx];
  1133. // process prefix prompt
  1134. for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch)
  1135. {
  1136. const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i));
  1137. llama_batch batch_view = {
  1138. n_tokens,
  1139. batch.token + i,
  1140. nullptr,
  1141. batch.pos + i,
  1142. batch.n_seq_id + i,
  1143. batch.seq_id + i,
  1144. batch.logits + i,
  1145. 0, 0, 0, // unused
  1146. };
  1147. if (llama_decode(ctx, batch_view))
  1148. {
  1149. LOG_TEE("%s : failed to eval\n", __func__);
  1150. return false;
  1151. }
  1152. }
  1153. // process image with llm
  1154. for (int i = 0; i < img.image_tokens; i += n_batch)
  1155. {
  1156. int n_eval = img.image_tokens - i;
  1157. if (n_eval > n_batch)
  1158. {
  1159. n_eval = n_batch;
  1160. }
  1161. const int n_embd = llama_n_embd(model);
  1162. llama_batch batch_img = {
  1163. n_eval,
  1164. nullptr,
  1165. (img.image_embedding + i * n_embd),
  1166. nullptr,
  1167. nullptr,
  1168. nullptr,
  1169. nullptr,
  1170. slot.n_past,
  1171. 1, 0
  1172. };
  1173. if (llama_decode(ctx, batch_img))
  1174. {
  1175. LOG_TEE("%s : failed to eval image\n", __func__);
  1176. return false;
  1177. }
  1178. slot.n_past += n_eval;
  1179. }
  1180. image_idx++;
  1181. llama_batch_clear(batch);
  1182. // append prefix of next image
  1183. const auto json_prompt = (image_idx >= (int) slot.images.size()) ?
  1184. slot.params.input_suffix : // no more images, then process suffix prompt
  1185. (json)(slot.images[image_idx].prefix_prompt);
  1186. std::vector<llama_token> append_tokens = tokenize(json_prompt, false); // has next image
  1187. for (int i = 0; i < (int) append_tokens.size(); ++i)
  1188. {
  1189. llama_batch_add(batch, append_tokens[i], system_tokens.size() + slot.n_past, { slot.id }, true);
  1190. slot.n_past += 1;
  1191. }
  1192. }
  1193. return true;
  1194. }
  1195. void request_cancel(int task_id)
  1196. {
  1197. task_server task;
  1198. task.type = TASK_TYPE_CANCEL;
  1199. task.target_id = task_id;
  1200. queue_tasks.post(task);
  1201. }
  1202. void split_multiprompt_task(int multitask_id, task_server& multiprompt_task)
  1203. {
  1204. int prompt_count = multiprompt_task.data.at("prompt").size();
  1205. if (prompt_count <= 1) {
  1206. send_error(multiprompt_task, "error while handling multiple prompts");
  1207. return;
  1208. }
  1209. // generate all the ID for subtask
  1210. std::vector<int> subtask_ids(prompt_count);
  1211. for (int i = 0; i < prompt_count; i++)
  1212. {
  1213. subtask_ids[i] = queue_tasks.get_new_id();
  1214. }
  1215. // queue up the multitask so we can track its subtask progression
  1216. queue_tasks.add_multitask(multitask_id, subtask_ids);
  1217. // add subtasks
  1218. for (int i = 0; i < prompt_count; i++)
  1219. {
  1220. json subtask_data = multiprompt_task.data;
  1221. subtask_data["prompt"] = subtask_data["prompt"][i];
  1222. // subtasks inherit everything else (infill mode, embedding mode, etc.)
  1223. request_completion(subtask_ids[i], subtask_data, multiprompt_task.infill_mode, multiprompt_task.embedding_mode, multitask_id);
  1224. }
  1225. }
  1226. void process_single_task(task_server& task)
  1227. {
  1228. switch (task.type)
  1229. {
  1230. case TASK_TYPE_COMPLETION: {
  1231. server_slot *slot = get_slot(json_value(task.data, "slot_id", -1));
  1232. if (slot == nullptr)
  1233. {
  1234. // if no slot is available, we defer this task for processing later
  1235. LOG_VERBOSE("no slot is available", {{"task_id", task.id}});
  1236. queue_tasks.defer(task);
  1237. break;
  1238. }
  1239. if (task.data.contains("system_prompt"))
  1240. {
  1241. if (!all_slots_are_idle) {
  1242. send_error(task, "system prompt can only be updated when all slots are idle");
  1243. break;
  1244. }
  1245. system_prompt_process(task.data["system_prompt"]);
  1246. // reset cache_tokens for all slots
  1247. for (server_slot &slot : slots)
  1248. {
  1249. slot.cache_tokens.clear();
  1250. slot.n_past = 0;
  1251. slot.n_past_se = 0;
  1252. }
  1253. }
  1254. slot->reset();
  1255. slot->infill = task.infill_mode;
  1256. slot->embedding = task.embedding_mode;
  1257. slot->task_id = task.id;
  1258. slot->multitask_id = task.multitask_id;
  1259. if (!launch_slot_with_data(slot, task.data))
  1260. {
  1261. // send error result
  1262. send_error(task, "internal_error");
  1263. break;
  1264. }
  1265. } break;
  1266. case TASK_TYPE_CANCEL: { // release slot linked with the task id
  1267. for (auto & slot : slots)
  1268. {
  1269. if (slot.task_id == task.target_id)
  1270. {
  1271. slot.release();
  1272. break;
  1273. }
  1274. }
  1275. } break;
  1276. case TASK_TYPE_NEXT_RESPONSE: {
  1277. // do nothing
  1278. } break;
  1279. case TASK_TYPE_METRICS: {
  1280. json slots_data = json::array();
  1281. int n_idle_slots = 0;
  1282. int n_processing_slots = 0;
  1283. for (server_slot &slot: slots) {
  1284. json slot_data = get_formated_generation(slot);
  1285. slot_data["id"] = slot.id;
  1286. slot_data["task_id"] = slot.task_id;
  1287. slot_data["state"] = slot.state;
  1288. slot_data["prompt"] = slot.prompt;
  1289. slot_data["next_token"] = {
  1290. {"has_next_token", slot.has_next_token},
  1291. {"n_remain", slot.n_remaining},
  1292. {"num_tokens_predicted", slot.n_decoded},
  1293. {"stopped_eos", slot.stopped_eos},
  1294. {"stopped_word", slot.stopped_word},
  1295. {"stopped_limit", slot.stopped_limit},
  1296. {"stopping_word", slot.stopping_word},
  1297. };
  1298. if (slot_data["state"] == IDLE) {
  1299. n_idle_slots++;
  1300. } else {
  1301. n_processing_slots++;
  1302. }
  1303. slots_data.push_back(slot_data);
  1304. }
  1305. LOG_DEBUG("slot data", {
  1306. {"task_id", task.id},
  1307. {"n_idle_slots", n_idle_slots},
  1308. {"n_processing_slots", n_processing_slots}
  1309. });
  1310. LOG_VERBOSE("slot data", {
  1311. {"task_id", task.id},
  1312. {"n_idle_slots", n_idle_slots},
  1313. {"n_processing_slots", n_processing_slots},
  1314. {"slots", slots_data}
  1315. });
  1316. task_result res;
  1317. res.id = task.id;
  1318. res.multitask_id = task.multitask_id;
  1319. res.stop = true;
  1320. res.error = false;
  1321. res.result_json = {
  1322. { "idle", n_idle_slots },
  1323. { "processing", n_processing_slots },
  1324. { "deferred", queue_tasks.queue_tasks_deferred.size() },
  1325. { "n_prompt_tokens_processed_total", metrics.n_prompt_tokens_processed_total},
  1326. { "n_tokens_predicted_total", metrics.n_tokens_predicted_total},
  1327. { "n_prompt_tokens_processed", metrics.n_prompt_tokens_processed},
  1328. { "t_prompt_processing", metrics.t_prompt_processing},
  1329. { "n_tokens_predicted", metrics.n_tokens_predicted},
  1330. { "t_tokens_generation", metrics.t_tokens_generation},
  1331. { "kv_cache_tokens_count", llama_get_kv_cache_token_count(ctx)},
  1332. { "kv_cache_used_cells", llama_get_kv_cache_used_cells(ctx)},
  1333. { "slots", slots_data },
  1334. };
  1335. metrics.reset_bucket();
  1336. queue_results.send(res);
  1337. } break;
  1338. }
  1339. }
  1340. void on_finish_multitask(task_multi& multitask)
  1341. {
  1342. // all subtasks done == multitask is done
  1343. task_result result;
  1344. result.id = multitask.id;
  1345. result.stop = true;
  1346. result.error = false;
  1347. // collect json results into one json result
  1348. std::vector<json> result_jsons;
  1349. for (auto& subres : multitask.results)
  1350. {
  1351. result_jsons.push_back(subres.result_json);
  1352. result.error = result.error && subres.error;
  1353. }
  1354. result.result_json = json{ { "results", result_jsons } };
  1355. queue_results.send(result);
  1356. }
  1357. bool update_slots() {
  1358. if (system_need_update)
  1359. {
  1360. LOG_DEBUG("updating system prompt", {});
  1361. system_prompt_update();
  1362. }
  1363. llama_batch_clear(batch);
  1364. if (all_slots_are_idle)
  1365. {
  1366. if (system_prompt.empty() && clean_kv_cache)
  1367. {
  1368. LOG_DEBUG("all slots are idle and system prompt is empty, clear the KV cache", {});
  1369. kv_cache_clear();
  1370. }
  1371. return true;
  1372. }
  1373. LOG_VERBOSE("posting NEXT_RESPONSE", {});
  1374. task_server task;
  1375. task.type = TASK_TYPE_NEXT_RESPONSE;
  1376. task.target_id = -1;
  1377. queue_tasks.post(task);
  1378. for (server_slot &slot : slots)
  1379. {
  1380. if (slot.ga_n == 1)
  1381. {
  1382. if (slot.is_processing() && system_tokens.size() + slot.cache_tokens.size() >= (size_t) slot.n_ctx)
  1383. {
  1384. // Shift context
  1385. const int n_keep = slot.params.n_keep + add_bos_token;
  1386. const int n_left = (int) system_tokens.size() + slot.n_past - n_keep;
  1387. const int n_discard = n_left / 2;
  1388. LOG_DEBUG("slot context shift", {
  1389. {"slot_id", slot.id},
  1390. {"task_id", slot.task_id},
  1391. {"n_keep", n_keep},
  1392. {"n_left", n_left},
  1393. {"n_discard", n_discard},
  1394. {"n_ctx", n_ctx},
  1395. {"n_past", slot.n_past},
  1396. {"n_system_tokens", system_tokens.size()},
  1397. {"n_cache_tokens", slot.cache_tokens.size()}
  1398. });
  1399. llama_kv_cache_seq_rm (ctx, slot.id, n_keep , n_keep + n_discard);
  1400. llama_kv_cache_seq_add(ctx, slot.id, n_keep + n_discard, system_tokens.size() + slot.n_past, -n_discard);
  1401. for (size_t i = n_keep + n_discard; i < slot.cache_tokens.size(); i++)
  1402. {
  1403. slot.cache_tokens[i - n_discard] = slot.cache_tokens[i];
  1404. }
  1405. slot.cache_tokens.resize(slot.cache_tokens.size() - n_discard);
  1406. slot.n_past -= n_discard;
  1407. slot.truncated = true;
  1408. }
  1409. }
  1410. }
  1411. // decode any currently ongoing sequences
  1412. LOG_VERBOSE("decoding ongoing sequences", {});
  1413. for (auto & slot : slots)
  1414. {
  1415. // release the slot
  1416. if (slot.command == RELEASE)
  1417. {
  1418. slot.state = IDLE;
  1419. slot.command = NONE;
  1420. slot.t_last_used = ggml_time_us();
  1421. LOG_DEBUG("slot released", {
  1422. {"slot_id", slot.id},
  1423. {"task_id", slot.task_id},
  1424. {"n_ctx", n_ctx},
  1425. {"n_past", slot.n_past},
  1426. {"n_system_tokens", system_tokens.size()},
  1427. {"n_cache_tokens", slot.cache_tokens.size()},
  1428. {"truncated", slot.truncated}
  1429. });
  1430. queue_tasks.notify_slot_changed();
  1431. continue;
  1432. }
  1433. if (slot.state == IDLE)
  1434. {
  1435. continue;
  1436. }
  1437. slot.i_batch = batch.n_tokens;
  1438. const int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1439. // TODO: we always have to take into account the "system_tokens"
  1440. // this is not great and needs to be improved somehow
  1441. llama_batch_add(batch, slot.sampled, system_tokens.size() + slot_npast, { slot.id }, true);
  1442. slot.n_past += 1;
  1443. }
  1444. // process in chunks of params.n_batch
  1445. int32_t n_batch = params.n_batch;
  1446. // assign workload to the slots
  1447. if (params.cont_batching || batch.n_tokens == 0)
  1448. {
  1449. for (auto & slot : slots)
  1450. {
  1451. const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get<std::string>().empty()) || !slot.images.empty();
  1452. // empty prompt passed -> release the slot and send empty response
  1453. // note: infill mode allows empty prompt
  1454. if (slot.state == IDLE && slot.command == LOAD_PROMPT && !has_prompt && !slot.infill)
  1455. {
  1456. slot.release();
  1457. slot.print_timings();
  1458. send_final_response(slot);
  1459. continue;
  1460. }
  1461. // need process the prompt
  1462. if (slot.state == IDLE && slot.command == LOAD_PROMPT)
  1463. {
  1464. slot.state = PROCESSING;
  1465. slot.command = NONE;
  1466. std::vector<llama_token> prompt_tokens;
  1467. slot.t_start_process_prompt = ggml_time_us();
  1468. slot.t_start_genereration = 0;
  1469. if (slot.infill)
  1470. {
  1471. bool suff_rm_leading_spc = true;
  1472. if (params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1)
  1473. {
  1474. params.input_suffix.erase(0, 1);
  1475. suff_rm_leading_spc = false;
  1476. }
  1477. auto prefix_tokens = tokenize(slot.params.input_prefix, false);
  1478. auto suffix_tokens = tokenize(slot.params.input_suffix, false);
  1479. const int space_token = 29871; // TODO: this should not be hardcoded
  1480. if (suff_rm_leading_spc && !suffix_tokens.empty() && suffix_tokens[0] == space_token) {
  1481. suffix_tokens.erase(suffix_tokens.begin());
  1482. }
  1483. prefix_tokens.insert(prefix_tokens.begin(), llama_token_prefix(model));
  1484. prefix_tokens.insert(prefix_tokens.begin(), llama_token_bos(model)); // always add BOS
  1485. prefix_tokens.insert(prefix_tokens.end(), llama_token_suffix(model));
  1486. prefix_tokens.insert(prefix_tokens.end(), suffix_tokens.begin(), suffix_tokens.end());
  1487. prefix_tokens.push_back(llama_token_middle(model));
  1488. prompt_tokens = prefix_tokens;
  1489. }
  1490. else
  1491. {
  1492. prompt_tokens = tokenize(slot.prompt, system_prompt.empty() && add_bos_token); // add BOS if there isn't system prompt
  1493. }
  1494. slot.n_prompt_tokens = prompt_tokens.size();
  1495. if (slot.params.n_keep < 0)
  1496. {
  1497. slot.params.n_keep = slot.n_prompt_tokens;
  1498. }
  1499. slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep);
  1500. // if input prompt is too big, truncate it, if group attention self-extend is disabled
  1501. if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx)
  1502. {
  1503. const int n_left = slot.n_ctx - slot.params.n_keep;
  1504. const int n_block_size = n_left / 2;
  1505. const int erased_blocks = (slot.n_prompt_tokens - slot.params.n_keep - n_block_size) / n_block_size;
  1506. std::vector<llama_token> new_tokens(
  1507. prompt_tokens.begin(),
  1508. prompt_tokens.begin() + slot.params.n_keep);
  1509. new_tokens.insert(
  1510. new_tokens.end(),
  1511. prompt_tokens.begin() + slot.params.n_keep + erased_blocks * n_block_size,
  1512. prompt_tokens.end());
  1513. LOG_VERBOSE("input truncated", {
  1514. {"n_ctx", slot.n_ctx},
  1515. {"n_keep", slot.params.n_keep},
  1516. {"n_left", n_left},
  1517. {"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
  1518. });
  1519. slot.truncated = true;
  1520. prompt_tokens = new_tokens;
  1521. slot.n_prompt_tokens = prompt_tokens.size();
  1522. GGML_ASSERT(slot.n_prompt_tokens < slot.n_ctx);
  1523. }
  1524. if (!slot.params.cache_prompt)
  1525. {
  1526. llama_sampling_reset(slot.ctx_sampling);
  1527. slot.n_past = 0;
  1528. slot.n_past_se = 0;
  1529. slot.ga_i = 0;
  1530. slot.n_prompt_tokens_processed = slot.n_prompt_tokens;
  1531. }
  1532. else
  1533. {
  1534. // push the prompt into the sampling context (do not apply grammar)
  1535. for (auto &token : prompt_tokens)
  1536. {
  1537. llama_sampling_accept(slot.ctx_sampling, ctx, token, false);
  1538. }
  1539. slot.n_past = common_part(slot.cache_tokens, prompt_tokens);
  1540. // the last token of the cache is not in the KV cache until the next call to llama_decode
  1541. // (it was sampled, pushed into the "cache_tokens", but not yet put in the context)
  1542. if (slot.n_past > 0 && slot.n_past == (int32_t) slot.cache_tokens.size())
  1543. {
  1544. slot.n_past -= 1;
  1545. }
  1546. slot.n_prompt_tokens_processed = slot.n_prompt_tokens - slot.n_past;
  1547. if (slot.ga_n != 1)
  1548. {
  1549. int ga_i = 0;
  1550. int32_t ga_n = slot.ga_n;
  1551. int32_t ga_w = slot.ga_w;
  1552. int32_t slot_npast = 0;
  1553. for (int k = 0; k < slot.n_past; ++k)
  1554. {
  1555. while (slot_npast >= ga_i + ga_w) {
  1556. const int bd = (ga_w/ga_n)*(ga_n - 1);
  1557. slot_npast -= bd;
  1558. ga_i += ga_w/ga_n;
  1559. }
  1560. slot_npast++;
  1561. }
  1562. slot.n_past_se = slot_npast;
  1563. slot.ga_i = ga_i;
  1564. }
  1565. LOG_DEBUG("slot progression", {
  1566. { "slot_id", slot.id },
  1567. { "task_id", slot.task_id },
  1568. { "n_past", slot.n_past },
  1569. { "n_past_se", slot.n_past_se },
  1570. { "ga_i", slot.ga_i },
  1571. { "n_prompt_tokens_processed", slot.n_prompt_tokens_processed }
  1572. });
  1573. }
  1574. slot.cache_tokens = prompt_tokens;
  1575. if (slot.n_past == slot.n_prompt_tokens && slot.n_past > 0)
  1576. {
  1577. // we have to evaluate at least 1 token to generate logits.
  1578. LOG_DEBUG("we have to evaluate at least 1 token to generate logits", {
  1579. { "slot_id", slot.id },
  1580. { "task_id", slot.task_id }
  1581. });
  1582. slot.n_past--;
  1583. if (slot.ga_i > 0)
  1584. {
  1585. slot.n_past_se--;
  1586. }
  1587. }
  1588. int p0 = (int) system_tokens.size() + slot.n_past;
  1589. LOG_DEBUG("kv cache rm [p0, end)", {
  1590. { "slot_id", slot.id },
  1591. { "task_id", slot.task_id },
  1592. { "p0", p0 }
  1593. });
  1594. llama_kv_cache_seq_rm(ctx, slot.id, p0, -1);
  1595. LOG_VERBOSE("prompt ingested", {
  1596. {"n_past", slot.n_past},
  1597. {"cached", tokens_to_str(ctx, slot.cache_tokens.cbegin(), slot.cache_tokens.cbegin() + slot.n_past)},
  1598. {"to_eval", tokens_to_str(ctx, slot.cache_tokens.cbegin() + slot.n_past, slot.cache_tokens.cend())},
  1599. });
  1600. const bool has_images = process_images(slot);
  1601. // process the prefix of first image
  1602. std::vector<llama_token> prefix_tokens = has_images ? tokenize(slot.images[0].prefix_prompt, add_bos_token) : prompt_tokens;
  1603. int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1604. int32_t ga_i = slot.ga_i;
  1605. int32_t ga_n = slot.ga_n;
  1606. int32_t ga_w = slot.ga_w;
  1607. for (; slot.n_past < (int) prefix_tokens.size(); ++slot.n_past)
  1608. {
  1609. if (slot.ga_n != 1)
  1610. {
  1611. while (slot_npast >= ga_i + ga_w) {
  1612. const int bd = (ga_w/ga_n)*(ga_n - 1);
  1613. slot_npast -= bd;
  1614. ga_i += ga_w/ga_n;
  1615. }
  1616. }
  1617. llama_batch_add(batch, prefix_tokens[slot.n_past], system_tokens.size() + slot_npast, { slot.id }, false);
  1618. slot_npast++;
  1619. }
  1620. if (has_images && !ingest_images(slot, n_batch))
  1621. {
  1622. LOG_ERROR("failed processing images", {
  1623. {"slot_id", slot.id},
  1624. {"task_id", slot.task_id},
  1625. });
  1626. // FIXME @phymbert: to be properly tested
  1627. // early returning without changing the slot state will block the slot for ever
  1628. // no one at the moment is checking the return value
  1629. return false;
  1630. }
  1631. // extract the logits only for the last token
  1632. if (batch.n_tokens > 0)
  1633. {
  1634. batch.logits[batch.n_tokens - 1] = true;
  1635. }
  1636. slot.n_decoded = 0;
  1637. slot.i_batch = batch.n_tokens - 1;
  1638. }
  1639. }
  1640. }
  1641. if (batch.n_tokens == 0)
  1642. {
  1643. all_slots_are_idle = true;
  1644. return true;
  1645. }
  1646. for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch)
  1647. {
  1648. const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i);
  1649. for (auto & slot : slots)
  1650. {
  1651. if (slot.ga_n != 1)
  1652. {
  1653. // context extension via Self-Extend
  1654. while (slot.n_past_se >= slot.ga_i + slot.ga_w)
  1655. {
  1656. const int ib = (slot.ga_n * slot.ga_i) / slot.ga_w;
  1657. const int bd = (slot.ga_w / slot.ga_n) * (slot.ga_n - 1);
  1658. const int dd = (slot.ga_w / slot.ga_n) - ib * bd - slot.ga_w;
  1659. LOG_TEE("\n");
  1660. LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i, slot.n_past_se, ib * bd, slot.ga_i + ib * bd, slot.n_past_se + ib * bd);
  1661. LOG_TEE("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w, slot.ga_n, (slot.ga_i + ib * bd) / slot.ga_n, (slot.ga_i + ib * bd + slot.ga_w) / slot.ga_n);
  1662. LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd + slot.ga_w, slot.n_past_se + ib * bd, dd, slot.ga_i + ib * bd + slot.ga_w + dd, slot.n_past_se + ib * bd + dd);
  1663. llama_kv_cache_seq_add(ctx, slot.id, slot.ga_i, slot.n_past_se, ib * bd);
  1664. llama_kv_cache_seq_div(ctx, slot.id, slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w,slot.ga_n);
  1665. llama_kv_cache_seq_add(ctx, slot.id, slot.ga_i + ib * bd + slot.ga_w,slot.n_past_se + ib * bd, dd);
  1666. slot.n_past_se -= bd;
  1667. slot.ga_i += slot.ga_w / slot.ga_n;
  1668. LOG_TEE("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", slot.n_past_se + bd, slot.n_past_se, slot.ga_i);
  1669. }
  1670. slot.n_past_se += n_tokens;
  1671. }
  1672. }
  1673. llama_batch batch_view =
  1674. {
  1675. n_tokens,
  1676. batch.token + i,
  1677. nullptr,
  1678. batch.pos + i,
  1679. batch.n_seq_id + i,
  1680. batch.seq_id + i,
  1681. batch.logits + i,
  1682. 0, 0, 0, // unused
  1683. };
  1684. const int ret = llama_decode(ctx, batch_view);
  1685. if (ret != 0)
  1686. {
  1687. if (n_batch == 1 || ret < 0)
  1688. {
  1689. // if you get here, it means the KV cache is full - try increasing it via the context size
  1690. LOG_TEE("%s : failed to decode the batch, n_batch = %d, ret = %d\n", __func__, n_batch, ret);
  1691. return false;
  1692. }
  1693. LOG_TEE("%s : failed to find free space in the KV cache, retrying with smaller n_batch = %d\n", __func__, n_batch / 2);
  1694. // retry with half the batch size to try to find a free slot in the KV cache
  1695. n_batch /= 2;
  1696. i -= n_batch;
  1697. continue;
  1698. }
  1699. for (auto & slot : slots)
  1700. {
  1701. if (slot.i_batch < (int) i || slot.i_batch >= (int) (i + n_tokens))
  1702. {
  1703. continue;
  1704. }
  1705. // prompt evaluated for embedding
  1706. if (slot.embedding)
  1707. {
  1708. send_embedding(slot, batch_view);
  1709. slot.release();
  1710. slot.i_batch = -1;
  1711. continue;
  1712. }
  1713. completion_token_output result;
  1714. const llama_token id = llama_sampling_sample(slot.ctx_sampling, ctx, NULL, slot.i_batch - i);
  1715. llama_sampling_accept(slot.ctx_sampling, ctx, id, true);
  1716. slot.n_decoded += 1;
  1717. if (slot.n_decoded == 1)
  1718. {
  1719. slot.t_start_genereration = ggml_time_us();
  1720. slot.t_prompt_processing = (slot.t_start_genereration - slot.t_start_process_prompt) / 1e3;
  1721. metrics.on_prompt_eval(slot);
  1722. }
  1723. llama_token_data_array cur_p = { slot.ctx_sampling->cur.data(), slot.ctx_sampling->cur.size(), false };
  1724. result.tok = id;
  1725. const int32_t n_probs = slot.sparams.n_probs;
  1726. if (slot.sparams.temp <= 0 && n_probs > 0)
  1727. {
  1728. // for llama_sample_token_greedy we need to sort candidates
  1729. llama_sample_softmax(ctx, &cur_p);
  1730. }
  1731. for (size_t i = 0; i < std::min(cur_p.size, (size_t)n_probs); ++i)
  1732. {
  1733. result.probs.push_back({cur_p.data[i].id, cur_p.data[i].p});
  1734. }
  1735. if (!process_token(result, slot))
  1736. {
  1737. slot.release();
  1738. slot.print_timings();
  1739. send_final_response(slot);
  1740. metrics.on_prediction(slot);
  1741. }
  1742. slot.i_batch = -1;
  1743. }
  1744. }
  1745. LOG_VERBOSE("slots updated", {});
  1746. return true;
  1747. }
  1748. json model_meta() {
  1749. return json{
  1750. {"vocab_type", llama_vocab_type(model)},
  1751. {"n_vocab", llama_n_vocab(model)},
  1752. {"n_ctx_train", llama_n_ctx_train(model)},
  1753. {"n_embd", llama_n_embd(model)},
  1754. {"n_params", llama_model_n_params(model)},
  1755. {"size", llama_model_size(model)},
  1756. };
  1757. }
  1758. };
  1759. static void server_print_usage(const char *argv0, const gpt_params &params,
  1760. const server_params &sparams)
  1761. {
  1762. printf("usage: %s [options]\n", argv0);
  1763. printf("\n");
  1764. printf("options:\n");
  1765. printf(" -h, --help show this help message and exit\n");
  1766. printf(" -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled");
  1767. printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
  1768. printf(" -tb N, --threads-batch N number of threads to use during batch and prompt processing (default: same as --threads)\n");
  1769. printf(" --threads-http N number of threads in the http server pool to process requests (default: max(hardware concurrency - 1, --parallel N + 2))\n");
  1770. printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
  1771. printf(" --rope-scaling {none,linear,yarn}\n");
  1772. printf(" RoPE frequency scaling method, defaults to linear unless specified by the model\n");
  1773. printf(" --rope-freq-base N RoPE base frequency (default: loaded from model)\n");
  1774. printf(" --rope-freq-scale N RoPE frequency scaling factor, expands context by a factor of 1/N\n");
  1775. printf(" --yarn-ext-factor N YaRN: extrapolation mix factor (default: 1.0, 0.0 = full interpolation)\n");
  1776. printf(" --yarn-attn-factor N YaRN: scale sqrt(t) or attention magnitude (default: 1.0)\n");
  1777. printf(" --yarn-beta-slow N YaRN: high correction dim or alpha (default: %.1f)\n", params.yarn_beta_slow);
  1778. printf(" --yarn-beta-fast N YaRN: low correction dim or beta (default: %.1f)\n", params.yarn_beta_fast);
  1779. printf(" --pooling {none,mean,cls}\n");
  1780. printf(" pooling type for embeddings, use model default if unspecified\n");
  1781. printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
  1782. printf(" --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
  1783. printf(" not recommended: doubles context memory required and no measurable increase in quality\n");
  1784. if (llama_supports_mlock())
  1785. {
  1786. printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
  1787. }
  1788. if (llama_supports_mmap())
  1789. {
  1790. printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
  1791. }
  1792. printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n");
  1793. printf(" - distribute: spread execution evenly over all nodes\n");
  1794. printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n");
  1795. printf(" - numactl: use the CPU map provided my numactl\n");
  1796. if (llama_supports_gpu_offload()) {
  1797. printf(" -ngl N, --n-gpu-layers N\n");
  1798. printf(" number of layers to store in VRAM\n");
  1799. printf(" -sm SPLIT_MODE, --split-mode SPLIT_MODE\n");
  1800. printf(" how to split the model across multiple GPUs, one of:\n");
  1801. printf(" - none: use one GPU only\n");
  1802. printf(" - layer (default): split layers and KV across GPUs\n");
  1803. printf(" - row: split rows across GPUs\n");
  1804. printf(" -ts SPLIT --tensor-split SPLIT\n");
  1805. printf(" fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1\n");
  1806. printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n");
  1807. printf(" or for intermediate results and KV (with split-mode = row)\n");
  1808. }
  1809. printf(" -m FNAME, --model FNAME\n");
  1810. printf(" model path (default: %s)\n", params.model.c_str());
  1811. printf(" -a ALIAS, --alias ALIAS\n");
  1812. printf(" set an alias for the model, will be added as `model` field in completion response\n");
  1813. printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
  1814. printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
  1815. printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
  1816. printf(" --port PORT port to listen (default (default: %d)\n", sparams.port);
  1817. printf(" --path PUBLIC_PATH path from which to serve static files (default %s)\n", sparams.public_path.c_str());
  1818. printf(" --api-key API_KEY optional api key to enhance server security. If set, requests must include this key for access.\n");
  1819. printf(" --api-key-file FNAME path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access.\n");
  1820. printf(" -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
  1821. printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
  1822. printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
  1823. printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
  1824. printf(" -fa, --flash-attn enable Flash Attention (default: %s)\n", params.flash_attn ? "enabled" : "disabled");
  1825. printf(" -spf FNAME, --system-prompt-file FNAME\n");
  1826. printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
  1827. printf(" -ctk TYPE, --cache-type-k TYPE\n");
  1828. printf(" KV cache data type for K (default: f16)\n");
  1829. printf(" -ctv TYPE, --cache-type-v TYPE\n");
  1830. printf(" KV cache data type for V (default: f16)\n");
  1831. printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA.\n");
  1832. printf(" --log-format log output format: json or text (default: json)\n");
  1833. printf(" --log-disable disables logging to a file.\n");
  1834. printf(" --slots-endpoint-disable disables slots monitoring endpoint.\n");
  1835. printf(" --metrics enable prometheus compatible metrics endpoint (default: %s).\n", sparams.metrics_endpoint ? "enabled" : "disabled");
  1836. printf("\n");
  1837. printf(" -n, --n-predict maximum tokens to predict (default: %d)\n", params.n_predict);
  1838. printf(" --override-kv KEY=TYPE:VALUE\n");
  1839. printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
  1840. printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
  1841. printf(" -gan N, --grp-attn-n N set the group attention factor to extend context size through self-extend(default: 1=disabled), used together with group attention width `--grp-attn-w`\n");
  1842. printf(" -gaw N, --grp-attn-w N set the group attention width to extend context size through self-extend(default: 512), used together with group attention factor `--grp-attn-n`\n");
  1843. printf(" --chat-template JINJA_TEMPLATE\n");
  1844. printf(" set custom jinja chat template (default: template taken from model's metadata)\n");
  1845. printf(" Note: only commonly used templates are accepted, since we don't have jinja parser\n");
  1846. printf("\n");
  1847. }
  1848. static void server_params_parse(int argc, char **argv, server_params &sparams,
  1849. gpt_params &params, llama_server_context& llama)
  1850. {
  1851. gpt_params default_params;
  1852. server_params default_sparams;
  1853. std::string arg;
  1854. bool invalid_param = false;
  1855. for (int i = 1; i < argc; i++)
  1856. {
  1857. arg = argv[i];
  1858. if (arg == "--port")
  1859. {
  1860. if (++i >= argc)
  1861. {
  1862. invalid_param = true;
  1863. break;
  1864. }
  1865. sparams.port = std::stoi(argv[i]);
  1866. }
  1867. else if (arg == "--host")
  1868. {
  1869. if (++i >= argc)
  1870. {
  1871. invalid_param = true;
  1872. break;
  1873. }
  1874. sparams.hostname = argv[i];
  1875. }
  1876. else if (arg == "--path")
  1877. {
  1878. if (++i >= argc)
  1879. {
  1880. invalid_param = true;
  1881. break;
  1882. }
  1883. sparams.public_path = argv[i];
  1884. }
  1885. else if (arg == "--api-key")
  1886. {
  1887. if (++i >= argc)
  1888. {
  1889. invalid_param = true;
  1890. break;
  1891. }
  1892. sparams.api_keys.emplace_back(argv[i]);
  1893. }
  1894. else if (arg == "--api-key-file")
  1895. {
  1896. if (++i >= argc)
  1897. {
  1898. invalid_param = true;
  1899. break;
  1900. }
  1901. std::ifstream key_file(argv[i]);
  1902. if (!key_file) {
  1903. fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
  1904. invalid_param = true;
  1905. break;
  1906. }
  1907. std::string key;
  1908. while (std::getline(key_file, key)) {
  1909. if (key.size() > 0) {
  1910. sparams.api_keys.push_back(key);
  1911. }
  1912. }
  1913. key_file.close();
  1914. }
  1915. else if (arg == "--timeout" || arg == "-to")
  1916. {
  1917. if (++i >= argc)
  1918. {
  1919. invalid_param = true;
  1920. break;
  1921. }
  1922. sparams.read_timeout = std::stoi(argv[i]);
  1923. sparams.write_timeout = std::stoi(argv[i]);
  1924. }
  1925. else if (arg == "-m" || arg == "--model")
  1926. {
  1927. if (++i >= argc)
  1928. {
  1929. invalid_param = true;
  1930. break;
  1931. }
  1932. params.model = argv[i];
  1933. }
  1934. else if (arg == "-a" || arg == "--alias")
  1935. {
  1936. if (++i >= argc)
  1937. {
  1938. invalid_param = true;
  1939. break;
  1940. }
  1941. params.model_alias = argv[i];
  1942. }
  1943. else if (arg == "-h" || arg == "--help")
  1944. {
  1945. server_print_usage(argv[0], default_params, default_sparams);
  1946. exit(0);
  1947. }
  1948. else if (arg == "-c" || arg == "--ctx-size" || arg == "--ctx_size")
  1949. {
  1950. if (++i >= argc)
  1951. {
  1952. invalid_param = true;
  1953. break;
  1954. }
  1955. params.n_ctx = std::stoi(argv[i]);
  1956. }
  1957. else if (arg == "--rope-scaling")
  1958. {
  1959. if (++i >= argc)
  1960. {
  1961. invalid_param = true;
  1962. break;
  1963. }
  1964. std::string value(argv[i]);
  1965. /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
  1966. else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
  1967. else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
  1968. else { invalid_param = true; break; }
  1969. }
  1970. else if (arg == "--rope-freq-base")
  1971. {
  1972. if (++i >= argc)
  1973. {
  1974. invalid_param = true;
  1975. break;
  1976. }
  1977. params.rope_freq_base = std::stof(argv[i]);
  1978. }
  1979. else if (arg == "--rope-freq-scale")
  1980. {
  1981. if (++i >= argc)
  1982. {
  1983. invalid_param = true;
  1984. break;
  1985. }
  1986. params.rope_freq_scale = std::stof(argv[i]);
  1987. }
  1988. else if (arg == "--yarn-ext-factor")
  1989. {
  1990. if (++i >= argc) {
  1991. invalid_param = true;
  1992. break;
  1993. }
  1994. params.yarn_ext_factor = std::stof(argv[i]);
  1995. }
  1996. else if (arg == "--yarn-attn-factor")
  1997. {
  1998. if (++i >= argc) {
  1999. invalid_param = true;
  2000. break;
  2001. }
  2002. params.yarn_attn_factor = std::stof(argv[i]);
  2003. }
  2004. else if (arg == "--yarn-beta-fast")
  2005. {
  2006. if (++i >= argc) {
  2007. invalid_param = true;
  2008. break;
  2009. }
  2010. params.yarn_beta_fast = std::stof(argv[i]);
  2011. }
  2012. else if (arg == "--yarn-beta-slow")
  2013. {
  2014. if (++i >= argc) {
  2015. invalid_param = true;
  2016. break;
  2017. }
  2018. params.yarn_beta_slow = std::stof(argv[i]);
  2019. }
  2020. else if (arg == "--pooling")
  2021. {
  2022. if (++i >= argc) {
  2023. invalid_param = true;
  2024. break;
  2025. }
  2026. std::string value(argv[i]);
  2027. /**/ if (value == "none") { params.pooling_type = LLAMA_POOLING_TYPE_NONE; }
  2028. else if (value == "mean") { params.pooling_type = LLAMA_POOLING_TYPE_MEAN; }
  2029. else if (value == "cls") { params.pooling_type = LLAMA_POOLING_TYPE_CLS; }
  2030. else { invalid_param = true; break; }
  2031. }
  2032. else if (arg == "--threads" || arg == "-t")
  2033. {
  2034. if (++i >= argc)
  2035. {
  2036. invalid_param = true;
  2037. break;
  2038. }
  2039. params.n_threads = std::stoi(argv[i]);
  2040. }
  2041. else if (arg == "--grp-attn-n" || arg == "-gan")
  2042. {
  2043. if (++i >= argc) {
  2044. invalid_param = true;
  2045. break;
  2046. }
  2047. params.grp_attn_n = std::stoi(argv[i]);
  2048. }
  2049. else if (arg == "--grp-attn-w" || arg == "-gaw")
  2050. {
  2051. if (++i >= argc)
  2052. {
  2053. invalid_param = true;
  2054. break;
  2055. }
  2056. params.grp_attn_w = std::stoi(argv[i]);
  2057. }
  2058. else if (arg == "--threads-batch" || arg == "-tb")
  2059. {
  2060. if (++i >= argc)
  2061. {
  2062. invalid_param = true;
  2063. break;
  2064. }
  2065. params.n_threads_batch = std::stoi(argv[i]);
  2066. }
  2067. else if (arg == "--threads-http")
  2068. {
  2069. if (++i >= argc)
  2070. {
  2071. invalid_param = true;
  2072. break;
  2073. }
  2074. sparams.n_threads_http = std::stoi(argv[i]);
  2075. }
  2076. else if (arg == "-b" || arg == "--batch-size")
  2077. {
  2078. if (++i >= argc)
  2079. {
  2080. invalid_param = true;
  2081. break;
  2082. }
  2083. params.n_batch = std::stoi(argv[i]);
  2084. }
  2085. else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers")
  2086. {
  2087. if (++i >= argc)
  2088. {
  2089. invalid_param = true;
  2090. break;
  2091. }
  2092. if (llama_supports_gpu_offload()) {
  2093. params.n_gpu_layers = std::stoi(argv[i]);
  2094. } else {
  2095. LOG_WARNING("Not compiled with GPU offload support, --n-gpu-layers option will be ignored. "
  2096. "See main README.md for information on enabling GPU BLAS support",
  2097. {{"n_gpu_layers", params.n_gpu_layers}});
  2098. }
  2099. }
  2100. else if (arg == "--split-mode" || arg == "-sm")
  2101. {
  2102. if (++i >= argc) {
  2103. invalid_param = true;
  2104. break;
  2105. }
  2106. std::string arg_next = argv[i];
  2107. if (arg_next == "none")
  2108. {
  2109. params.split_mode = LLAMA_SPLIT_MODE_NONE;
  2110. }
  2111. else if (arg_next == "layer")
  2112. {
  2113. params.split_mode = LLAMA_SPLIT_MODE_LAYER;
  2114. }
  2115. else if (arg_next == "row")
  2116. {
  2117. params.split_mode = LLAMA_SPLIT_MODE_ROW;
  2118. }
  2119. else {
  2120. invalid_param = true;
  2121. break;
  2122. }
  2123. #ifndef GGML_USE_CUBLAS
  2124. fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting the split mode has no effect.\n");
  2125. #endif // GGML_USE_CUBLAS
  2126. }
  2127. else if (arg == "--tensor-split" || arg == "-ts")
  2128. {
  2129. if (++i >= argc)
  2130. {
  2131. invalid_param = true;
  2132. break;
  2133. }
  2134. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
  2135. std::string arg_next = argv[i];
  2136. // split string by , and /
  2137. const std::regex regex{R"([,/]+)"};
  2138. std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
  2139. std::vector<std::string> split_arg{it, {}};
  2140. GGML_ASSERT(split_arg.size() <= llama_max_devices());
  2141. for (size_t i_device = 0; i_device < llama_max_devices(); ++i_device)
  2142. {
  2143. if (i_device < split_arg.size())
  2144. {
  2145. params.tensor_split[i_device] = std::stof(split_arg[i_device]);
  2146. }
  2147. else
  2148. {
  2149. params.tensor_split[i_device] = 0.0f;
  2150. }
  2151. }
  2152. #else
  2153. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n", {});
  2154. #endif // GGML_USE_CUBLAS
  2155. }
  2156. else if (arg == "--main-gpu" || arg == "-mg")
  2157. {
  2158. if (++i >= argc)
  2159. {
  2160. invalid_param = true;
  2161. break;
  2162. }
  2163. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
  2164. params.main_gpu = std::stoi(argv[i]);
  2165. #else
  2166. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
  2167. #endif
  2168. }
  2169. else if (arg == "--lora")
  2170. {
  2171. if (++i >= argc)
  2172. {
  2173. invalid_param = true;
  2174. break;
  2175. }
  2176. params.lora_adapter.emplace_back(argv[i], 1.0f);
  2177. params.use_mmap = false;
  2178. }
  2179. else if (arg == "--lora-scaled")
  2180. {
  2181. if (++i >= argc)
  2182. {
  2183. invalid_param = true;
  2184. break;
  2185. }
  2186. const char * lora_adapter = argv[i];
  2187. if (++i >= argc)
  2188. {
  2189. invalid_param = true;
  2190. break;
  2191. }
  2192. params.lora_adapter.emplace_back(lora_adapter, std::stof(argv[i]));
  2193. params.use_mmap = false;
  2194. }
  2195. else if (arg == "--lora-base")
  2196. {
  2197. if (++i >= argc)
  2198. {
  2199. invalid_param = true;
  2200. break;
  2201. }
  2202. params.lora_base = argv[i];
  2203. }
  2204. else if (arg == "-v" || arg == "--verbose")
  2205. {
  2206. server_verbose = true;
  2207. }
  2208. else if (arg == "--mlock")
  2209. {
  2210. params.use_mlock = true;
  2211. }
  2212. else if (arg == "--no-mmap")
  2213. {
  2214. params.use_mmap = false;
  2215. }
  2216. else if (arg == "--numa")
  2217. {
  2218. if (++i >= argc) {
  2219. invalid_param = true;
  2220. break;
  2221. } else {
  2222. std::string value(argv[i]);
  2223. /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
  2224. else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
  2225. else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
  2226. else { invalid_param = true; break; }
  2227. }
  2228. }
  2229. else if (arg == "--embedding")
  2230. {
  2231. params.embedding = true;
  2232. }
  2233. else if (arg == "-cb" || arg == "--cont-batching")
  2234. {
  2235. params.cont_batching = true;
  2236. }
  2237. else if (arg == "-fa" || arg == "--flash-attn")
  2238. {
  2239. params.flash_attn = true;
  2240. }
  2241. else if (arg == "-np" || arg == "--parallel")
  2242. {
  2243. if (++i >= argc)
  2244. {
  2245. invalid_param = true;
  2246. break;
  2247. }
  2248. params.n_parallel = std::stoi(argv[i]);
  2249. }
  2250. else if (arg == "-n" || arg == "--n-predict")
  2251. {
  2252. if (++i >= argc)
  2253. {
  2254. invalid_param = true;
  2255. break;
  2256. }
  2257. params.n_predict = std::stoi(argv[i]);
  2258. }
  2259. else if (arg == "-spf" || arg == "--system-prompt-file")
  2260. {
  2261. if (++i >= argc)
  2262. {
  2263. invalid_param = true;
  2264. break;
  2265. }
  2266. std::ifstream file(argv[i]);
  2267. if (!file) {
  2268. fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
  2269. invalid_param = true;
  2270. break;
  2271. }
  2272. std::string systm_content;
  2273. std::copy(
  2274. std::istreambuf_iterator<char>(file),
  2275. std::istreambuf_iterator<char>(),
  2276. std::back_inserter(systm_content)
  2277. );
  2278. llama.system_prompt_process(json::parse(systm_content));
  2279. }
  2280. else if (arg == "-ctk" || arg == "--cache-type-k") {
  2281. params.cache_type_k = argv[++i];
  2282. }
  2283. else if (arg == "-ctv" || arg == "--cache-type-v") {
  2284. params.cache_type_v = argv[++i];
  2285. }
  2286. else if(arg == "--mmproj")
  2287. {
  2288. if (++i >= argc)
  2289. {
  2290. invalid_param = true;
  2291. break;
  2292. }
  2293. params.mmproj = argv[i];
  2294. }
  2295. else if (arg == "--log-format")
  2296. {
  2297. if (++i >= argc)
  2298. {
  2299. invalid_param = true;
  2300. break;
  2301. }
  2302. if (std::strcmp(argv[i], "json") == 0)
  2303. {
  2304. server_log_json = true;
  2305. }
  2306. else if (std::strcmp(argv[i], "text") == 0)
  2307. {
  2308. server_log_json = false;
  2309. }
  2310. else
  2311. {
  2312. invalid_param = true;
  2313. break;
  2314. }
  2315. }
  2316. else if (arg == "--log-disable")
  2317. {
  2318. log_set_target(stdout);
  2319. LOG_DEBUG("logging to file is disabled.", {});
  2320. }
  2321. else if (arg == "--slots-endpoint-disable")
  2322. {
  2323. sparams.slots_endpoint = false;
  2324. }
  2325. else if (arg == "--metrics")
  2326. {
  2327. sparams.metrics_endpoint = true;
  2328. }
  2329. else if (arg == "--chat-template")
  2330. {
  2331. if (++i >= argc)
  2332. {
  2333. invalid_param = true;
  2334. break;
  2335. }
  2336. if (!verify_custom_template(argv[i])) {
  2337. fprintf(stderr, "error: the supplied chat template is not supported: %s\n", argv[i]);
  2338. fprintf(stderr, "note: llama.cpp does not use jinja parser, we only support commonly used templates\n");
  2339. invalid_param = true;
  2340. break;
  2341. }
  2342. sparams.chat_template = argv[i];
  2343. }
  2344. else if (arg == "--override-kv")
  2345. {
  2346. if (++i >= argc) {
  2347. invalid_param = true;
  2348. break;
  2349. }
  2350. char * sep = strchr(argv[i], '=');
  2351. if (sep == nullptr || sep - argv[i] >= 128) {
  2352. fprintf(stderr, "error: Malformed KV override: %s\n", argv[i]);
  2353. invalid_param = true;
  2354. break;
  2355. }
  2356. struct llama_model_kv_override kvo;
  2357. std::strncpy(kvo.key, argv[i], sep - argv[i]);
  2358. kvo.key[sep - argv[i]] = 0;
  2359. sep++;
  2360. if (strncmp(sep, "int:", 4) == 0) {
  2361. sep += 4;
  2362. kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
  2363. kvo.val_i64 = std::atol(sep);
  2364. } else if (strncmp(sep, "float:", 6) == 0) {
  2365. sep += 6;
  2366. kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
  2367. kvo.val_f64 = std::atof(sep);
  2368. } else if (strncmp(sep, "bool:", 5) == 0) {
  2369. sep += 5;
  2370. kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
  2371. if (std::strcmp(sep, "true") == 0) {
  2372. kvo.val_bool = true;
  2373. } else if (std::strcmp(sep, "false") == 0) {
  2374. kvo.val_bool = false;
  2375. } else {
  2376. fprintf(stderr, "error: Invalid boolean value for KV override: %s\n", argv[i]);
  2377. invalid_param = true;
  2378. break;
  2379. }
  2380. } else {
  2381. fprintf(stderr, "error: Invalid type for KV override: %s\n", argv[i]);
  2382. invalid_param = true;
  2383. break;
  2384. }
  2385. params.kv_overrides.push_back(kvo);
  2386. }
  2387. else
  2388. {
  2389. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  2390. server_print_usage(argv[0], default_params, default_sparams);
  2391. exit(1);
  2392. }
  2393. }
  2394. if (!params.kv_overrides.empty()) {
  2395. params.kv_overrides.emplace_back();
  2396. params.kv_overrides.back().key[0] = 0;
  2397. }
  2398. if (invalid_param)
  2399. {
  2400. fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
  2401. server_print_usage(argv[0], default_params, default_sparams);
  2402. exit(1);
  2403. }
  2404. }
  2405. /* llama.cpp completion api semantics */
  2406. static json format_partial_response(
  2407. llama_server_context &llama, server_slot *slot, const std::string &content, const std::vector<completion_token_output> &probs
  2408. ) {
  2409. json res = json
  2410. {
  2411. {"content", content },
  2412. {"stop", false},
  2413. {"slot_id", slot->id },
  2414. {"multimodal", llama.multimodal }
  2415. };
  2416. if (slot->sparams.n_probs > 0)
  2417. {
  2418. res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
  2419. }
  2420. return res;
  2421. }
  2422. static json format_tokenizer_response(const std::vector<llama_token> &tokens)
  2423. {
  2424. return json {
  2425. {"tokens", tokens}
  2426. };
  2427. }
  2428. static json format_detokenized_response(std::string content)
  2429. {
  2430. return json {
  2431. {"content", content}
  2432. };
  2433. }
  2434. static void log_server_request(const httplib::Request &req, const httplib::Response &res)
  2435. {
  2436. // skip GH copilot requests when using default port
  2437. if (req.path == "/health" || req.path == "/v1/health" || req.path == "/v1/completions")
  2438. {
  2439. return;
  2440. }
  2441. LOG_DEBUG("request", {
  2442. {"remote_addr", req.remote_addr},
  2443. {"remote_port", req.remote_port},
  2444. {"status", res.status},
  2445. {"method", req.method},
  2446. {"path", req.path},
  2447. {"params", req.params},
  2448. });
  2449. LOG_VERBOSE("request", {
  2450. {"request", req.body},
  2451. {"response", res.body},
  2452. });
  2453. }
  2454. static void append_to_generated_text_from_generated_token_probs(llama_server_context &llama, server_slot *slot)
  2455. {
  2456. auto & gtps = slot->generated_token_probs;
  2457. auto translator = token_translator{llama.ctx};
  2458. auto add_strlen = [=](size_t sum, const completion_token_output & cto) { return sum + translator(cto).size(); };
  2459. const size_t len = std::accumulate(gtps.begin(), gtps.end(), size_t(0), add_strlen);
  2460. if (slot->generated_text.capacity() < slot->generated_text.size() + len)
  2461. {
  2462. slot->generated_text.reserve(slot->generated_text.size() + len);
  2463. }
  2464. for (const completion_token_output & cto : gtps)
  2465. {
  2466. slot->generated_text += translator(cto);
  2467. }
  2468. }
  2469. std::function<void(int)> shutdown_handler;
  2470. std::atomic_flag is_terminating = ATOMIC_FLAG_INIT;
  2471. inline void signal_handler(int signal) {
  2472. if (is_terminating.test_and_set()) {
  2473. // in case it hangs, we can force terminate the server by hitting Ctrl+C twice
  2474. // this is for better developer experience, we can remove when the server is stable enough
  2475. fprintf(stderr, "Received second interrupt, terminating immediately.\n");
  2476. exit(1);
  2477. }
  2478. shutdown_handler(signal);
  2479. }
  2480. static bool update_load_progress(float progress, void *data)
  2481. {
  2482. ((llama_server_context*)data)->modelProgress = progress;
  2483. return true;
  2484. }
  2485. #if defined(_WIN32)
  2486. char* wchar_to_char(const wchar_t* wstr) {
  2487. if (wstr == nullptr) return nullptr;
  2488. // Determine the number of bytes needed for the UTF-8 string
  2489. int bytes = WideCharToMultiByte(CP_UTF8, 0, wstr, -1, nullptr, 0, nullptr, nullptr);
  2490. char* str = new char[bytes];
  2491. // Convert the wide-character string to a UTF-8 string
  2492. WideCharToMultiByte(CP_UTF8, 0, wstr, -1, str, bytes, nullptr, nullptr);
  2493. return str;
  2494. }
  2495. int wmain(int argc, wchar_t **wargv) {
  2496. char** argv = new char*[argc];
  2497. for (int i = 0; i < argc; ++i) {
  2498. argv[i] = wchar_to_char(wargv[i]);
  2499. }
  2500. #else
  2501. int main(int argc, char **argv) {
  2502. #endif
  2503. #if SERVER_VERBOSE != 1
  2504. log_disable();
  2505. #endif
  2506. // own arguments required by this example
  2507. gpt_params params;
  2508. server_params sparams;
  2509. // struct that contains llama context and inference
  2510. llama_server_context llama;
  2511. server_params_parse(argc, argv, sparams, params, llama);
  2512. if (params.model_alias == "unknown")
  2513. {
  2514. params.model_alias = params.model;
  2515. }
  2516. llama_backend_init();
  2517. llama_numa_init(params.numa);
  2518. LOG_INFO("build info", {{"build", LLAMA_BUILD_NUMBER},
  2519. {"commit", LLAMA_COMMIT}});
  2520. LOG_INFO("system info", {
  2521. {"n_threads", params.n_threads},
  2522. {"n_threads_batch", params.n_threads_batch},
  2523. {"total_threads", std::thread::hardware_concurrency()},
  2524. {"system_info", llama_print_system_info()},
  2525. });
  2526. httplib::Server svr;
  2527. std::atomic<server_state> state{SERVER_STATE_LOADING_MODEL};
  2528. svr.set_default_headers({{"Server", "llama.cpp"}});
  2529. // CORS preflight
  2530. svr.Options(R"(.*)", [](const httplib::Request &req, httplib::Response &res) {
  2531. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2532. res.set_header("Access-Control-Allow-Credentials", "true");
  2533. res.set_header("Access-Control-Allow-Methods", "POST");
  2534. res.set_header("Access-Control-Allow-Headers", "*");
  2535. });
  2536. svr.Get("/health", [&](const httplib::Request& req, httplib::Response& res) {
  2537. server_state current_state = state.load();
  2538. switch(current_state) {
  2539. case SERVER_STATE_READY: {
  2540. // request slots data using task queue
  2541. task_server task;
  2542. task.id = llama.queue_tasks.get_new_id();
  2543. task.type = TASK_TYPE_METRICS;
  2544. task.target_id = -1;
  2545. llama.queue_results.add_waiting_task_id(task.id);
  2546. llama.queue_tasks.post(task);
  2547. // get the result
  2548. task_result result = llama.queue_results.recv(task.id);
  2549. llama.queue_results.remove_waiting_task_id(task.id);
  2550. int n_idle_slots = result.result_json["idle"];
  2551. int n_processing_slots = result.result_json["processing"];
  2552. json health = {
  2553. {"status", "ok"},
  2554. {"slots_idle", n_idle_slots},
  2555. {"slots_processing", n_processing_slots}};
  2556. res.status = 200; // HTTP OK
  2557. if (sparams.slots_endpoint && req.has_param("include_slots")) {
  2558. health["slots"] = result.result_json["slots"];
  2559. }
  2560. if (n_idle_slots == 0) {
  2561. health["status"] = "no slot available";
  2562. if (req.has_param("fail_on_no_slot")) {
  2563. res.status = 503; // HTTP Service Unavailable
  2564. }
  2565. }
  2566. res.set_content(health.dump(), "application/json");
  2567. break;
  2568. }
  2569. case SERVER_STATE_LOADING_MODEL:
  2570. char buf[128];
  2571. snprintf(&buf[0], 128, R"({"status": "loading model", "progress": %0.2f})", llama.modelProgress);
  2572. res.set_content(buf, "application/json");
  2573. res.status = 503; // HTTP Service Unavailable
  2574. break;
  2575. case SERVER_STATE_ERROR:
  2576. res.set_content(R"({"status": "error", "error": "Model failed to load"})", "application/json");
  2577. res.status = 500; // HTTP Internal Server Error
  2578. break;
  2579. }
  2580. });
  2581. if (sparams.slots_endpoint) {
  2582. svr.Get("/slots", [&](const httplib::Request&, httplib::Response& res) {
  2583. // request slots data using task queue
  2584. task_server task;
  2585. task.id = llama.queue_tasks.get_new_id();
  2586. task.type = TASK_TYPE_METRICS;
  2587. task.target_id = -1;
  2588. llama.queue_results.add_waiting_task_id(task.id);
  2589. llama.queue_tasks.post(task);
  2590. // get the result
  2591. task_result result = llama.queue_results.recv(task.id);
  2592. llama.queue_results.remove_waiting_task_id(task.id);
  2593. res.set_content(result.result_json["slots"].dump(), "application/json");
  2594. res.status = 200; // HTTP OK
  2595. });
  2596. }
  2597. if (sparams.metrics_endpoint) {
  2598. svr.Get("/metrics", [&](const httplib::Request&, httplib::Response& res) {
  2599. // request slots data using task queue
  2600. task_server task;
  2601. task.id = llama.queue_tasks.get_new_id();
  2602. task.type = TASK_TYPE_METRICS;
  2603. task.target_id = -1;
  2604. llama.queue_results.add_waiting_task_id(task.id);
  2605. llama.queue_tasks.post(task);
  2606. // get the result
  2607. task_result result = llama.queue_results.recv(task.id);
  2608. llama.queue_results.remove_waiting_task_id(task.id);
  2609. json data = result.result_json;
  2610. uint64_t n_prompt_tokens_processed = data["n_prompt_tokens_processed"];
  2611. uint64_t t_prompt_processing = data["t_prompt_processing"];
  2612. uint64_t n_tokens_predicted = data["n_tokens_predicted"];
  2613. uint64_t t_tokens_generation = data["t_tokens_generation"];
  2614. int32_t kv_cache_used_cells = data["kv_cache_used_cells"];
  2615. // metrics definition: https://prometheus.io/docs/practices/naming/#metric-names
  2616. json all_metrics_def = json {
  2617. {"counter", {{
  2618. {"name", "prompt_tokens_total"},
  2619. {"help", "Number of prompt tokens processed."},
  2620. {"value", data["n_prompt_tokens_processed_total"]}
  2621. }, {
  2622. {"name", "tokens_predicted_total"},
  2623. {"help", "Number of generation tokens processed."},
  2624. {"value", data["n_tokens_predicted_total"]}
  2625. }}},
  2626. {"gauge", {{
  2627. {"name", "prompt_tokens_seconds"},
  2628. {"help", "Average prompt throughput in tokens/s."},
  2629. {"value", n_prompt_tokens_processed ? 1e3 / t_prompt_processing * n_prompt_tokens_processed : 0}
  2630. },{
  2631. {"name", "predicted_tokens_seconds"},
  2632. {"help", "Average generation throughput in tokens/s."},
  2633. {"value", n_tokens_predicted ? 1e3 / t_tokens_generation * n_tokens_predicted : 0}
  2634. },{
  2635. {"name", "kv_cache_usage_ratio"},
  2636. {"help", "KV-cache usage. 1 means 100 percent usage."},
  2637. {"value", 1. * kv_cache_used_cells / params.n_ctx}
  2638. },{
  2639. {"name", "kv_cache_tokens"},
  2640. {"help", "KV-cache tokens."},
  2641. {"value", data["kv_cache_tokens_count"]}
  2642. },{
  2643. {"name", "requests_processing"},
  2644. {"help", "Number of request processing."},
  2645. {"value", data["processing"]}
  2646. },{
  2647. {"name", "requests_deferred"},
  2648. {"help", "Number of request deferred."},
  2649. {"value", data["deferred"]}
  2650. }}}
  2651. };
  2652. std::stringstream prometheus;
  2653. for (const auto& el : all_metrics_def.items()) {
  2654. const auto& type = el.key();
  2655. const auto& metrics_def = el.value();
  2656. for (const auto& metric_def : metrics_def) {
  2657. std::string name = metric_def["name"];
  2658. std::string help = metric_def["help"];
  2659. auto value = json_value(metric_def, "value", 0);
  2660. prometheus << "# HELP llamacpp:" << name << " " << help << "\n"
  2661. << "# TYPE llamacpp:" << name << " " << type << "\n"
  2662. << "llamacpp:" << name << " " << value << "\n";
  2663. }
  2664. }
  2665. res.set_content(prometheus.str(), "text/plain; version=0.0.4");
  2666. res.status = 200; // HTTP OK
  2667. });
  2668. }
  2669. svr.set_logger(log_server_request);
  2670. svr.set_exception_handler([](const httplib::Request &, httplib::Response &res, std::exception_ptr ep)
  2671. {
  2672. const char fmt[] = "500 Internal Server Error\n%s";
  2673. char buf[BUFSIZ];
  2674. try
  2675. {
  2676. std::rethrow_exception(std::move(ep));
  2677. }
  2678. catch (std::exception &e)
  2679. {
  2680. snprintf(buf, sizeof(buf), fmt, e.what());
  2681. }
  2682. catch (...)
  2683. {
  2684. snprintf(buf, sizeof(buf), fmt, "Unknown Exception");
  2685. }
  2686. res.set_content(buf, "text/plain; charset=utf-8");
  2687. res.status = 500;
  2688. });
  2689. svr.set_error_handler([](const httplib::Request &, httplib::Response &res)
  2690. {
  2691. if (res.status == 401)
  2692. {
  2693. res.set_content("Unauthorized", "text/plain; charset=utf-8");
  2694. }
  2695. if (res.status == 400)
  2696. {
  2697. res.set_content("Invalid request", "text/plain; charset=utf-8");
  2698. }
  2699. else if (res.status == 404)
  2700. {
  2701. res.set_content("File Not Found", "text/plain; charset=utf-8");
  2702. res.status = 404;
  2703. }
  2704. });
  2705. // set timeouts and change hostname and port
  2706. svr.set_read_timeout (sparams.read_timeout);
  2707. svr.set_write_timeout(sparams.write_timeout);
  2708. if (!svr.bind_to_port(sparams.hostname, sparams.port))
  2709. {
  2710. fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", sparams.hostname.c_str(), sparams.port);
  2711. return 1;
  2712. }
  2713. // Set the base directory for serving static files
  2714. svr.set_base_dir(sparams.public_path);
  2715. std::unordered_map<std::string, std::string> log_data;
  2716. log_data["hostname"] = sparams.hostname;
  2717. log_data["port"] = std::to_string(sparams.port);
  2718. if (sparams.api_keys.size() == 1) {
  2719. log_data["api_key"] = "api_key: ****" + sparams.api_keys[0].substr(sparams.api_keys[0].length() - 4);
  2720. } else if (sparams.api_keys.size() > 1) {
  2721. log_data["api_key"] = "api_key: " + std::to_string(sparams.api_keys.size()) + " keys loaded";
  2722. }
  2723. if (sparams.n_threads_http < 1) {
  2724. // +2 threads for monitoring endpoints
  2725. sparams.n_threads_http = std::max(params.n_parallel + 2, (int32_t) std::thread::hardware_concurrency() - 1);
  2726. }
  2727. log_data["n_threads_http"] = std::to_string(sparams.n_threads_http);
  2728. svr.new_task_queue = [&sparams] { return new httplib::ThreadPool(sparams.n_threads_http); };
  2729. LOG_INFO("HTTP server listening", log_data);
  2730. // run the HTTP server in a thread - see comment below
  2731. std::thread t([&]()
  2732. {
  2733. if (!svr.listen_after_bind())
  2734. {
  2735. state.store(SERVER_STATE_ERROR);
  2736. return 1;
  2737. }
  2738. return 0;
  2739. });
  2740. // load the model
  2741. params.progress_callback = update_load_progress;
  2742. params.progress_callback_user_data = (void*)&llama;
  2743. if (!llama.load_model(params))
  2744. {
  2745. state.store(SERVER_STATE_ERROR);
  2746. return 1;
  2747. } else {
  2748. llama.initialize();
  2749. state.store(SERVER_STATE_READY);
  2750. LOG_INFO("model loaded", {});
  2751. }
  2752. const auto model_meta = llama.model_meta();
  2753. if (sparams.chat_template.empty()) { // custom chat template is not supplied
  2754. // check if the template comes with the model is supported by us
  2755. llama.validate_model_chat_template(sparams);
  2756. }
  2757. // Middleware for API key validation
  2758. auto validate_api_key = [&sparams](const httplib::Request &req, httplib::Response &res) -> bool {
  2759. // If API key is not set, skip validation
  2760. if (sparams.api_keys.empty()) {
  2761. return true;
  2762. }
  2763. // Check for API key in the header
  2764. auto auth_header = req.get_header_value("Authorization");
  2765. std::string prefix = "Bearer ";
  2766. if (auth_header.substr(0, prefix.size()) == prefix) {
  2767. std::string received_api_key = auth_header.substr(prefix.size());
  2768. if (std::find(sparams.api_keys.begin(), sparams.api_keys.end(), received_api_key) != sparams.api_keys.end()) {
  2769. return true; // API key is valid
  2770. }
  2771. }
  2772. // API key is invalid or not provided
  2773. res.set_content("Unauthorized: Invalid API Key", "text/plain; charset=utf-8");
  2774. res.status = 401; // Unauthorized
  2775. LOG_WARNING("Unauthorized: Invalid API Key", {});
  2776. return false;
  2777. };
  2778. // this is only called if no index.html is found in the public --path
  2779. svr.Get("/", [](const httplib::Request &, httplib::Response &res)
  2780. {
  2781. res.set_content("server running", "text/plain; charset=utf-8");
  2782. res.status = 200; // Unauthorized
  2783. return true;
  2784. });
  2785. svr.Post("/completion", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res)
  2786. {
  2787. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2788. if (!validate_api_key(req, res)) {
  2789. return;
  2790. }
  2791. json data = json::parse(req.body);
  2792. const int task_id = llama.queue_tasks.get_new_id();
  2793. llama.queue_results.add_waiting_task_id(task_id);
  2794. llama.request_completion(task_id, data, false, false, -1);
  2795. if (!json_value(data, "stream", false)) {
  2796. std::string completion_text;
  2797. task_result result = llama.queue_results.recv(task_id);
  2798. if (!result.error && result.stop) {
  2799. res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
  2800. }
  2801. else
  2802. {
  2803. res.status = 404;
  2804. res.set_content(result.result_json["content"], "text/plain; charset=utf-8");
  2805. }
  2806. llama.queue_results.remove_waiting_task_id(task_id);
  2807. } else {
  2808. const auto chunked_content_provider = [task_id, &llama](size_t, httplib::DataSink & sink)
  2809. {
  2810. while (true)
  2811. {
  2812. task_result result = llama.queue_results.recv(task_id);
  2813. if (!result.error) {
  2814. const std::string str =
  2815. "data: " +
  2816. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
  2817. "\n\n";
  2818. LOG_VERBOSE("data stream", {
  2819. { "to_send", str }
  2820. });
  2821. if (!sink.write(str.c_str(), str.size()))
  2822. {
  2823. llama.queue_results.remove_waiting_task_id(task_id);
  2824. return false;
  2825. }
  2826. if (result.stop) {
  2827. break;
  2828. }
  2829. } else {
  2830. const std::string str =
  2831. "error: " +
  2832. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
  2833. "\n\n";
  2834. LOG_VERBOSE("data stream", {
  2835. { "to_send", str }
  2836. });
  2837. if (!sink.write(str.c_str(), str.size()))
  2838. {
  2839. llama.queue_results.remove_waiting_task_id(task_id);
  2840. return false;
  2841. }
  2842. break;
  2843. }
  2844. }
  2845. llama.queue_results.remove_waiting_task_id(task_id);
  2846. sink.done();
  2847. return true;
  2848. };
  2849. auto on_complete = [task_id, &llama] (bool)
  2850. {
  2851. // cancel
  2852. llama.request_cancel(task_id);
  2853. llama.queue_results.remove_waiting_task_id(task_id);
  2854. };
  2855. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2856. }
  2857. });
  2858. svr.Post("/tokenize", [&llama](const httplib::Request &req, httplib::Response &res)
  2859. {
  2860. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2861. const json body = json::parse(req.body);
  2862. std::vector<llama_token> tokens;
  2863. if (body.count("content") != 0)
  2864. {
  2865. tokens = llama.tokenize(body["content"], false);
  2866. }
  2867. const json data = format_tokenizer_response(tokens);
  2868. return res.set_content(data.dump(), "application/json; charset=utf-8");
  2869. });
  2870. svr.Post("/detokenize", [&llama](const httplib::Request &req, httplib::Response &res)
  2871. {
  2872. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2873. const json body = json::parse(req.body);
  2874. std::string content;
  2875. if (body.count("tokens") != 0)
  2876. {
  2877. const std::vector<llama_token> tokens = body["tokens"];
  2878. content = tokens_to_str(llama.ctx, tokens.cbegin(), tokens.cend());
  2879. }
  2880. const json data = format_detokenized_response(content);
  2881. return res.set_content(data.dump(), "application/json; charset=utf-8");
  2882. });
  2883. svr.Post("/embedding", [&llama](const httplib::Request &req, httplib::Response &res)
  2884. {
  2885. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2886. const json body = json::parse(req.body);
  2887. json prompt;
  2888. if (body.count("content") != 0)
  2889. {
  2890. prompt = body["content"];
  2891. }
  2892. else
  2893. {
  2894. prompt = "";
  2895. }
  2896. json image_data;
  2897. if (body.count("image_data") != 0) {
  2898. image_data = body["image_data"];
  2899. }
  2900. else
  2901. {
  2902. image_data = "";
  2903. }
  2904. // create and queue the task
  2905. const int task_id = llama.queue_tasks.get_new_id();
  2906. llama.queue_results.add_waiting_task_id(task_id);
  2907. llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, false, true, -1);
  2908. // get the result
  2909. task_result result = llama.queue_results.recv(task_id);
  2910. llama.queue_results.remove_waiting_task_id(task_id);
  2911. // send the result
  2912. return res.set_content(result.result_json.dump(), "application/json; charset=utf-8");
  2913. });
  2914. // GG: if I put the main loop inside a thread, it crashes on the first request when build in Debug!?
  2915. // "Bus error: 10" - this is on macOS, it does not crash on Linux
  2916. //std::thread t2([&]()
  2917. /*{
  2918. bool running = true;
  2919. while (running)
  2920. {
  2921. running = llama.update_slots();
  2922. }
  2923. }*/
  2924. //);
  2925. llama.queue_tasks.on_new_task(std::bind(
  2926. &llama_server_context::process_single_task, &llama, std::placeholders::_1));
  2927. llama.queue_tasks.on_finish_multitask(std::bind(
  2928. &llama_server_context::on_finish_multitask, &llama, std::placeholders::_1));
  2929. llama.queue_tasks.on_run_slots(std::bind(
  2930. &llama_server_context::update_slots, &llama));
  2931. llama.queue_results.on_multitask_update(std::bind(
  2932. &llama_server_queue::update_multitask,
  2933. &llama.queue_tasks,
  2934. std::placeholders::_1,
  2935. std::placeholders::_2,
  2936. std::placeholders::_3
  2937. ));
  2938. shutdown_handler = [&](int) {
  2939. llama.queue_tasks.terminate();
  2940. };
  2941. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  2942. struct sigaction sigint_action;
  2943. sigint_action.sa_handler = signal_handler;
  2944. sigemptyset (&sigint_action.sa_mask);
  2945. sigint_action.sa_flags = 0;
  2946. sigaction(SIGINT, &sigint_action, NULL);
  2947. #elif defined (_WIN32)
  2948. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  2949. return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
  2950. };
  2951. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  2952. for (int i = 0; i < argc; ++i) {
  2953. delete[] argv[i];
  2954. }
  2955. delete[] argv;
  2956. #endif
  2957. llama.queue_tasks.start_loop();
  2958. svr.stop();
  2959. t.join();
  2960. llama_backend_free();
  2961. return 0;
  2962. }