server.cpp 123 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277
  1. // MIT License
  2. // Copyright (c) 2023 Georgi Gerganov
  3. // Permission is hereby granted, free of charge, to any person obtaining a copy
  4. // of this software and associated documentation files (the "Software"), to deal
  5. // in the Software without restriction, including without limitation the rights
  6. // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  7. // copies of the Software, and to permit persons to whom the Software is
  8. // furnished to do so, subject to the following conditions:
  9. // The above copyright notice and this permission notice shall be included in all
  10. // copies or substantial portions of the Software.
  11. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  12. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  14. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  15. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  16. // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  17. // SOFTWARE.
  18. #include "common.h"
  19. #include "llama.h"
  20. #include "grammar-parser.h"
  21. #include "utils.hpp"
  22. #include "../llava/clip.h"
  23. #include "../llava/llava.h"
  24. #include "stb_image.h"
  25. #ifndef NDEBUG
  26. // crash the server in debug mode, otherwise send an http 500 error
  27. #define CPPHTTPLIB_NO_EXCEPTIONS 1
  28. #endif
  29. // increase max payload length to allow use of larger context size
  30. #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
  31. #include "httplib.h"
  32. #include "json.hpp"
  33. #if defined(_WIN32)
  34. #include <windows.h>
  35. #include <errhandlingapi.h>
  36. #endif
  37. #include <cstddef>
  38. #include <thread>
  39. #include <chrono>
  40. #include <condition_variable>
  41. #include <atomic>
  42. #include <signal.h>
  43. using json = nlohmann::json;
  44. struct server_params {
  45. std::string hostname = "127.0.0.1";
  46. std::vector<std::string> api_keys;
  47. std::string public_path = "examples/server/public";
  48. int32_t port = 8080;
  49. int32_t read_timeout = 600;
  50. int32_t write_timeout = 600;
  51. bool slots_endpoint = true;
  52. bool metrics_endpoint = false;
  53. int n_threads_http = -1;
  54. };
  55. bool server_verbose = false;
  56. bool server_log_json = false;
  57. enum stop_type {
  58. STOP_FULL,
  59. STOP_PARTIAL,
  60. };
  61. // TODO: can become bool if we can't find use of more states
  62. enum slot_state {
  63. IDLE,
  64. PROCESSING,
  65. };
  66. enum slot_command {
  67. NONE,
  68. LOAD_PROMPT,
  69. RELEASE,
  70. };
  71. struct slot_params {
  72. bool stream = true;
  73. bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt
  74. uint32_t seed = -1; // RNG seed
  75. int32_t n_keep = 0; // number of tokens to keep from initial prompt
  76. int32_t n_predict = -1; // new tokens to predict
  77. std::vector<std::string> antiprompt;
  78. json input_prefix;
  79. json input_suffix;
  80. };
  81. struct slot_image {
  82. int32_t id;
  83. bool request_encode_image = false;
  84. float * image_embedding = nullptr;
  85. int32_t image_tokens = 0;
  86. clip_image_u8 * img_data;
  87. std::string prefix_prompt; // before of this image
  88. };
  89. struct server_slot {
  90. int id;
  91. int task_id = -1;
  92. struct slot_params params;
  93. slot_state state = IDLE;
  94. slot_command command = NONE;
  95. // used to determine the slot that has been used the longest
  96. int64_t t_last_used = -1;
  97. // generation props
  98. int32_t n_ctx = 0; // context size per slot
  99. int32_t n_past = 0;
  100. int32_t n_decoded = 0;
  101. int32_t n_remaining = -1;
  102. int32_t i_batch = -1;
  103. int32_t n_predict = -1;
  104. int32_t n_prompt_tokens = 0;
  105. int32_t n_prompt_tokens_processed = 0;
  106. json prompt;
  107. std::string generated_text;
  108. llama_token sampled;
  109. std::vector<llama_token> cache_tokens;
  110. std::vector<completion_token_output> generated_token_probs;
  111. bool embedding = false;
  112. bool has_next_token = true;
  113. bool truncated = false;
  114. bool stopped_eos = false;
  115. bool stopped_word = false;
  116. bool stopped_limit = false;
  117. std::string stopping_word;
  118. // sampling
  119. struct llama_sampling_params sparams;
  120. llama_sampling_context *ctx_sampling = nullptr;
  121. int32_t ga_i = 0; // group-attention state
  122. int32_t ga_n = 1; // group-attention factor
  123. int32_t ga_w = 512; // group-attention width
  124. int32_t n_past_se = 0; // self-extend
  125. // multimodal
  126. std::vector<slot_image> images;
  127. // stats
  128. size_t n_sent_text = 0; // number of sent text character
  129. size_t n_sent_token_probs = 0;
  130. int64_t t_start_process_prompt;
  131. int64_t t_start_genereration;
  132. double t_prompt_processing; // ms
  133. double t_token_generation; // ms
  134. // multitasks
  135. int multitask_id = -1;
  136. void reset() {
  137. n_prompt_tokens = 0;
  138. generated_text = "";
  139. truncated = false;
  140. stopped_eos = false;
  141. stopped_word = false;
  142. stopped_limit = false;
  143. stopping_word = "";
  144. n_past = 0;
  145. n_sent_text = 0;
  146. n_sent_token_probs = 0;
  147. ga_i = 0;
  148. n_past_se = 0;
  149. generated_token_probs.clear();
  150. for (slot_image & img : images) {
  151. free(img.image_embedding);
  152. if (img.img_data) {
  153. clip_image_u8_free(img.img_data);
  154. }
  155. img.prefix_prompt = "";
  156. }
  157. images.clear();
  158. }
  159. bool has_budget(gpt_params &global_params) {
  160. if (params.n_predict == -1 && global_params.n_predict == -1) {
  161. return true; // limitless
  162. }
  163. n_remaining = -1;
  164. if (params.n_predict != -1) {
  165. n_remaining = params.n_predict - n_decoded;
  166. } else if (global_params.n_predict != -1) {
  167. n_remaining = global_params.n_predict - n_decoded;
  168. }
  169. return n_remaining > 0; // no budget
  170. }
  171. bool available() const {
  172. return state == IDLE && command == NONE;
  173. }
  174. bool is_processing() const {
  175. return (state == IDLE && command == LOAD_PROMPT) || state == PROCESSING;
  176. }
  177. void add_token_string(const completion_token_output &token) {
  178. if (command == RELEASE) {
  179. return;
  180. }
  181. cache_tokens.push_back(token.tok);
  182. generated_token_probs.push_back(token);
  183. }
  184. void release() {
  185. if (state == PROCESSING)
  186. {
  187. t_token_generation = (ggml_time_us() - t_start_genereration) / 1e3;
  188. command = RELEASE;
  189. }
  190. }
  191. json get_formated_timings() {
  192. return json
  193. {
  194. {"prompt_n", n_prompt_tokens_processed},
  195. {"prompt_ms", t_prompt_processing},
  196. {"prompt_per_token_ms", t_prompt_processing / n_prompt_tokens_processed},
  197. {"prompt_per_second", 1e3 / t_prompt_processing * n_prompt_tokens_processed},
  198. {"predicted_n", n_decoded},
  199. {"predicted_ms", t_token_generation},
  200. {"predicted_per_token_ms", t_token_generation / n_decoded},
  201. {"predicted_per_second", 1e3 / t_token_generation * n_decoded},
  202. };
  203. }
  204. void print_timings() const {
  205. char buffer[512];
  206. double t_token = t_prompt_processing / n_prompt_tokens_processed;
  207. double n_tokens_second = 1e3 / t_prompt_processing * n_prompt_tokens_processed;
  208. sprintf(buffer, "prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)",
  209. t_prompt_processing, n_prompt_tokens_processed,
  210. t_token, n_tokens_second);
  211. LOG_DEBUG(buffer, {
  212. {"slot_id", id},
  213. {"task_id", task_id},
  214. {"t_prompt_processing", t_prompt_processing},
  215. {"n_prompt_tokens_processed", n_prompt_tokens_processed},
  216. {"t_token", t_token},
  217. {"n_tokens_second", n_tokens_second},
  218. });
  219. t_token = t_token_generation / n_decoded;
  220. n_tokens_second = 1e3 / t_token_generation * n_decoded;
  221. sprintf(buffer, "generation eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)",
  222. t_token_generation, n_decoded,
  223. t_token, n_tokens_second);
  224. LOG_DEBUG(buffer, {
  225. {"slot_id", id},
  226. {"task_id", task_id},
  227. {"t_token_generation", t_token_generation},
  228. {"n_decoded", n_decoded},
  229. {"t_token", t_token},
  230. {"n_tokens_second", n_tokens_second},
  231. });
  232. sprintf(buffer, " total time = %10.2f ms", t_prompt_processing + t_token_generation);
  233. LOG_DEBUG(buffer, {
  234. {"slot_id", id},
  235. {"task_id", task_id},
  236. {"t_prompt_processing", t_prompt_processing},
  237. {"t_token_generation", t_token_generation},
  238. {"t_total", t_prompt_processing + t_token_generation},
  239. });
  240. }
  241. };
  242. struct server_metrics {
  243. uint64_t n_prompt_tokens_processed_total = 0;
  244. uint64_t n_tokens_predicted_total = 0;
  245. uint64_t n_prompt_tokens_processed = 0;
  246. uint64_t t_prompt_processing = 0;
  247. uint64_t n_tokens_predicted = 0;
  248. uint64_t t_tokens_generation = 0;
  249. void on_prompt_eval(const server_slot &slot) {
  250. n_prompt_tokens_processed_total += slot.n_prompt_tokens_processed;
  251. n_prompt_tokens_processed += slot.n_prompt_tokens_processed;
  252. t_prompt_processing += slot.t_prompt_processing;
  253. }
  254. void on_prediction(const server_slot &slot) {
  255. n_tokens_predicted_total += slot.n_decoded;
  256. n_tokens_predicted += slot.n_decoded;
  257. t_tokens_generation += slot.t_token_generation;
  258. }
  259. void reset_bucket() {
  260. n_prompt_tokens_processed = 0;
  261. t_prompt_processing = 0;
  262. n_tokens_predicted = 0;
  263. t_tokens_generation = 0;
  264. }
  265. };
  266. struct llama_server_context
  267. {
  268. llama_model *model = nullptr;
  269. float modelProgress = 0.0;
  270. llama_context *ctx = nullptr;
  271. clip_ctx *clp_ctx = nullptr;
  272. gpt_params params;
  273. llama_batch batch;
  274. bool multimodal = false;
  275. bool clean_kv_cache = true;
  276. bool all_slots_are_idle = false;
  277. bool add_bos_token = true;
  278. int32_t n_ctx; // total context for all clients / slots
  279. // system prompt
  280. bool system_need_update = false;
  281. std::string system_prompt;
  282. std::vector<llama_token> system_tokens;
  283. std::string name_user; // this should be the antiprompt
  284. std::string name_assistant;
  285. // slots / clients
  286. std::vector<server_slot> slots;
  287. llama_server_queue queue_tasks;
  288. llama_server_response queue_results;
  289. server_metrics metrics;
  290. ~llama_server_context()
  291. {
  292. if (clp_ctx)
  293. {
  294. LOG_DEBUG("freeing clip model", {});
  295. clip_free(clp_ctx);
  296. clp_ctx = nullptr;
  297. }
  298. if (ctx)
  299. {
  300. llama_free(ctx);
  301. ctx = nullptr;
  302. }
  303. if (model)
  304. {
  305. llama_free_model(model);
  306. model = nullptr;
  307. }
  308. }
  309. bool load_model(const gpt_params &params_)
  310. {
  311. params = params_;
  312. if (!params.mmproj.empty()) {
  313. multimodal = true;
  314. LOG_DEBUG("Multi Modal Mode Enabled", {});
  315. clp_ctx = clip_model_load(params.mmproj.c_str(), /*verbosity=*/ 1);
  316. if(clp_ctx == nullptr) {
  317. LOG_ERROR("unable to load clip model", {{"model", params.mmproj}});
  318. return false;
  319. }
  320. if (params.n_ctx < 2048) { // request larger context for the image embedding
  321. params.n_ctx = 2048;
  322. }
  323. }
  324. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  325. if (model == nullptr)
  326. {
  327. LOG_ERROR("unable to load model", {{"model", params.model}});
  328. return false;
  329. }
  330. if (multimodal) {
  331. const int n_embd_clip = clip_n_mmproj_embd(clp_ctx);
  332. const int n_embd_llm = llama_n_embd(model);
  333. if (n_embd_clip != n_embd_llm) {
  334. LOG_TEE("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_embd_clip, n_embd_llm);
  335. llama_free(ctx);
  336. llama_free_model(model);
  337. return false;
  338. }
  339. }
  340. n_ctx = llama_n_ctx(ctx);
  341. add_bos_token = llama_should_add_bos_token(model);
  342. return true;
  343. }
  344. void initialize() {
  345. // create slots
  346. all_slots_are_idle = true;
  347. const int32_t n_ctx_slot = n_ctx / params.n_parallel;
  348. LOG_DEBUG("initializing slots", {{"n_slots", params.n_parallel}});
  349. for (int i = 0; i < params.n_parallel; i++)
  350. {
  351. server_slot slot;
  352. slot.id = i;
  353. slot.n_ctx = n_ctx_slot;
  354. slot.n_predict = params.n_predict;
  355. LOG_DEBUG("new slot", {
  356. {"slot_id", slot.id},
  357. {"n_ctx_slot", slot.n_ctx}
  358. });
  359. const int ga_n = params.grp_attn_n;
  360. const int ga_w = params.grp_attn_w;
  361. if (ga_n != 1) {
  362. GGML_ASSERT(ga_n > 0 && "ga_n must be positive"); // NOLINT
  363. GGML_ASSERT(ga_w % ga_n == 0 && "ga_w must be a multiple of ga_n"); // NOLINT
  364. //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of ga_w"); // NOLINT
  365. //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * ga_n"); // NOLINT
  366. LOG_DEBUG("slot self-extend", {
  367. {"slot_id", slot.id},
  368. {"ga_n", ga_n},
  369. {"ga_w", ga_w}
  370. });
  371. }
  372. slot.ga_i = 0;
  373. slot.ga_n = ga_n;
  374. slot.ga_w = ga_w;
  375. slot.reset();
  376. slots.push_back(slot);
  377. }
  378. batch = llama_batch_init(n_ctx, 0, params.n_parallel);
  379. }
  380. std::vector<llama_token> tokenize(const json & json_prompt, bool add_bos) const
  381. {
  382. // TODO: currently, we tokenize using special tokens by default
  383. // this is not always correct (see https://github.com/ggerganov/llama.cpp/pull/4160#issuecomment-1824826216)
  384. // but it's better compared to completely ignoring ChatML and other chat templates
  385. const bool TMP_FORCE_SPECIAL = true;
  386. // If `add_bos` is true, we only add BOS, when json_prompt is a string,
  387. // or the first element of the json_prompt array is a string.
  388. std::vector<llama_token> prompt_tokens;
  389. if (json_prompt.is_array())
  390. {
  391. bool first = true;
  392. for (const auto& p : json_prompt)
  393. {
  394. if (p.is_string())
  395. {
  396. auto s = p.template get<std::string>();
  397. std::vector<llama_token> p;
  398. if (first)
  399. {
  400. p = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL);
  401. first = false;
  402. }
  403. else
  404. {
  405. p = ::llama_tokenize(ctx, s, false, TMP_FORCE_SPECIAL);
  406. }
  407. prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
  408. }
  409. else
  410. {
  411. if (first)
  412. {
  413. first = false;
  414. }
  415. prompt_tokens.push_back(p.template get<llama_token>());
  416. }
  417. }
  418. }
  419. else
  420. {
  421. auto s = json_prompt.template get<std::string>();
  422. prompt_tokens = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL);
  423. }
  424. return prompt_tokens;
  425. }
  426. server_slot* get_slot(int id) {
  427. int64_t t_last = ggml_time_us();
  428. server_slot *last_used = nullptr;
  429. for (server_slot & slot : slots)
  430. {
  431. if (slot.id == id && slot.available())
  432. {
  433. return &slot;
  434. }
  435. if (slot.available() && slot.t_last_used < t_last)
  436. {
  437. last_used = &slot;
  438. t_last = slot.t_last_used;
  439. }
  440. }
  441. return last_used;
  442. }
  443. bool launch_slot_with_data(server_slot* &slot, json data) {
  444. slot_params default_params;
  445. llama_sampling_params default_sparams;
  446. slot->params.stream = json_value(data, "stream", false);
  447. slot->params.cache_prompt = json_value(data, "cache_prompt", false);
  448. slot->params.n_predict = json_value(data, "n_predict", default_params.n_predict);
  449. slot->sparams.top_k = json_value(data, "top_k", default_sparams.top_k);
  450. slot->sparams.top_p = json_value(data, "top_p", default_sparams.top_p);
  451. slot->sparams.min_p = json_value(data, "min_p", default_sparams.min_p);
  452. slot->sparams.tfs_z = json_value(data, "tfs_z", default_sparams.tfs_z);
  453. slot->sparams.typical_p = json_value(data, "typical_p", default_sparams.typical_p);
  454. slot->sparams.temp = json_value(data, "temperature", default_sparams.temp);
  455. slot->sparams.dynatemp_range = json_value(data, "dynatemp_range", default_sparams.dynatemp_range);
  456. slot->sparams.dynatemp_exponent = json_value(data, "dynatemp_exponent", default_sparams.dynatemp_exponent);
  457. slot->sparams.penalty_last_n = json_value(data, "repeat_last_n", default_sparams.penalty_last_n);
  458. slot->sparams.penalty_repeat = json_value(data, "repeat_penalty", default_sparams.penalty_repeat);
  459. slot->sparams.penalty_freq = json_value(data, "frequency_penalty", default_sparams.penalty_freq);
  460. slot->sparams.penalty_present = json_value(data, "presence_penalty", default_sparams.penalty_present);
  461. slot->sparams.mirostat = json_value(data, "mirostat", default_sparams.mirostat);
  462. slot->sparams.mirostat_tau = json_value(data, "mirostat_tau", default_sparams.mirostat_tau);
  463. slot->sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
  464. slot->sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
  465. slot->params.n_keep = json_value(data, "n_keep", slot->params.n_keep);
  466. slot->sparams.seed = json_value(data, "seed", default_params.seed);
  467. slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
  468. slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
  469. slot->sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
  470. if (slot->n_predict > 0 && slot->params.n_predict > slot->n_predict) {
  471. // Might be better to reject the request with a 400 ?
  472. LOG_WARNING("Max tokens to predict exceeds server configuration", {
  473. {"params.n_predict", slot->params.n_predict},
  474. {"slot.n_predict", slot->n_predict},
  475. });
  476. slot->params.n_predict = slot->n_predict;
  477. }
  478. if (data.count("input_suffix") != 0)
  479. {
  480. slot->params.input_suffix = data["input_suffix"];
  481. }
  482. else
  483. {
  484. slot->params.input_suffix = "";
  485. }
  486. if (data.count("prompt") != 0)
  487. {
  488. slot->prompt = data["prompt"];
  489. }
  490. else
  491. {
  492. slot->prompt = "";
  493. }
  494. slot->sparams.penalty_prompt_tokens.clear();
  495. slot->sparams.use_penalty_prompt_tokens = false;
  496. const auto &penalty_prompt = data.find("penalty_prompt");
  497. if (penalty_prompt != data.end())
  498. {
  499. if (penalty_prompt->is_string())
  500. {
  501. const auto penalty_prompt_string = penalty_prompt->get<std::string>();
  502. auto penalty_tokens = llama_tokenize(model, penalty_prompt_string, false);
  503. slot->sparams.penalty_prompt_tokens.swap(penalty_tokens);
  504. if (slot->params.n_predict > 0)
  505. {
  506. slot->sparams.penalty_prompt_tokens.reserve(slot->sparams.penalty_prompt_tokens.size() + slot->params.n_predict);
  507. }
  508. slot->sparams.use_penalty_prompt_tokens = true;
  509. }
  510. else if (penalty_prompt->is_array())
  511. {
  512. const auto n_tokens = penalty_prompt->size();
  513. slot->sparams.penalty_prompt_tokens.reserve(n_tokens + std::max(0, slot->params.n_predict));
  514. const int n_vocab = llama_n_vocab(model);
  515. for (const auto &penalty_token : *penalty_prompt)
  516. {
  517. if (penalty_token.is_number_integer())
  518. {
  519. const auto tok = penalty_token.get<llama_token>();
  520. if (tok >= 0 && tok < n_vocab)
  521. {
  522. slot->sparams.penalty_prompt_tokens.push_back(tok);
  523. }
  524. }
  525. }
  526. slot->sparams.use_penalty_prompt_tokens = true;
  527. }
  528. }
  529. slot->sparams.logit_bias.clear();
  530. if (json_value(data, "ignore_eos", false))
  531. {
  532. slot->sparams.logit_bias[llama_token_eos(model)] = -INFINITY;
  533. }
  534. const auto &logit_bias = data.find("logit_bias");
  535. if (logit_bias != data.end() && logit_bias->is_array())
  536. {
  537. const int n_vocab = llama_n_vocab(model);
  538. for (const auto &el : *logit_bias)
  539. {
  540. if (el.is_array() && el.size() == 2)
  541. {
  542. float bias;
  543. if (el[1].is_number())
  544. {
  545. bias = el[1].get<float>();
  546. }
  547. else if (el[1].is_boolean() && !el[1].get<bool>())
  548. {
  549. bias = -INFINITY;
  550. }
  551. else
  552. {
  553. continue;
  554. }
  555. if (el[0].is_number_integer())
  556. {
  557. llama_token tok = el[0].get<llama_token>();
  558. if (tok >= 0 && tok < n_vocab)
  559. {
  560. slot->sparams.logit_bias[tok] = bias;
  561. }
  562. }
  563. else if (el[0].is_string())
  564. {
  565. auto toks = llama_tokenize(model, el[0].get<std::string>(), false);
  566. for (auto tok : toks)
  567. {
  568. slot->sparams.logit_bias[tok] = bias;
  569. }
  570. }
  571. }
  572. }
  573. }
  574. slot->params.antiprompt.clear();
  575. const auto &stop = data.find("stop");
  576. if (stop != data.end() && stop->is_array())
  577. {
  578. for (const auto &word : *stop)
  579. {
  580. if (!word.empty())
  581. {
  582. slot->params.antiprompt.push_back(word);
  583. }
  584. }
  585. }
  586. const auto &samplers_sequence = data.find("samplers");
  587. if (samplers_sequence != data.end() && samplers_sequence->is_array())
  588. {
  589. std::vector<std::string> sampler_names;
  590. for (const auto &sampler_name : *samplers_sequence)
  591. {
  592. if (sampler_name.is_string())
  593. {
  594. sampler_names.emplace_back(sampler_name);
  595. }
  596. }
  597. slot->sparams.samplers_sequence = llama_sampling_types_from_names(sampler_names, false);
  598. }
  599. else
  600. {
  601. slot->sparams.samplers_sequence = default_sparams.samplers_sequence;
  602. }
  603. if (multimodal)
  604. {
  605. const auto &images_data = data.find("image_data");
  606. if (images_data != data.end() && images_data->is_array())
  607. {
  608. for (const auto &img : *images_data)
  609. {
  610. const std::vector<uint8_t> image_buffer = base64_decode(img["data"].get<std::string>());
  611. slot_image img_sl;
  612. img_sl.id = img.count("id") != 0 ? img["id"].get<int>() : slot->images.size();
  613. img_sl.img_data = clip_image_u8_init();
  614. if (!clip_image_load_from_bytes(image_buffer.data(), image_buffer.size(), img_sl.img_data))
  615. {
  616. LOG_ERROR("failed to load image", {
  617. {"slot_id", slot->id},
  618. {"img_sl_id", img_sl.id}
  619. });
  620. return false;
  621. }
  622. LOG_VERBOSE("image loaded", {
  623. {"slot_id", slot->id},
  624. {"img_sl_id", img_sl.id}
  625. });
  626. img_sl.request_encode_image = true;
  627. slot->images.push_back(img_sl);
  628. }
  629. // process prompt
  630. // example: system prompt [img-102] user [img-103] describe [img-134] -> [{id: 102, prefix: 'system prompt '}, {id: 103, prefix: ' user '}, {id: 134, prefix: ' describe '}]}
  631. if (slot->images.size() > 0 && !slot->prompt.is_array())
  632. {
  633. std::string prompt = slot->prompt.get<std::string>();
  634. size_t pos = 0, begin_prefix = 0;
  635. std::string pattern = "[img-";
  636. while ((pos = prompt.find(pattern, pos)) != std::string::npos) {
  637. size_t end_prefix = pos;
  638. pos += pattern.length();
  639. size_t end_pos = prompt.find(']', pos);
  640. if (end_pos != std::string::npos)
  641. {
  642. std::string image_id = prompt.substr(pos, end_pos - pos);
  643. try
  644. {
  645. int img_id = std::stoi(image_id);
  646. bool found = false;
  647. for (slot_image &img : slot->images)
  648. {
  649. if (img.id == img_id) {
  650. found = true;
  651. img.prefix_prompt = prompt.substr(begin_prefix, end_prefix - begin_prefix);
  652. begin_prefix = end_pos + 1;
  653. break;
  654. }
  655. }
  656. if (!found) {
  657. LOG_TEE("ERROR: Image with id: %i, not found.\n", img_id);
  658. slot->images.clear();
  659. return false;
  660. }
  661. } catch (const std::invalid_argument& e) {
  662. LOG_TEE("Invalid image number id in prompt\n");
  663. slot->images.clear();
  664. return false;
  665. }
  666. }
  667. }
  668. slot->prompt = "";
  669. slot->params.input_suffix = prompt.substr(begin_prefix);
  670. slot->params.cache_prompt = false; // multimodal doesn't support cache prompt
  671. }
  672. }
  673. }
  674. if (slot->ctx_sampling != nullptr)
  675. {
  676. llama_sampling_free(slot->ctx_sampling);
  677. }
  678. slot->ctx_sampling = llama_sampling_init(slot->sparams);
  679. slot->command = LOAD_PROMPT;
  680. all_slots_are_idle = false;
  681. LOG_DEBUG("slot is processing task", {
  682. {"slot_id", slot->id},
  683. {"task_id", slot->task_id},
  684. });
  685. return true;
  686. }
  687. void kv_cache_clear() {
  688. // clear the entire KV cache
  689. llama_kv_cache_clear(ctx);
  690. clean_kv_cache = false;
  691. }
  692. void system_prompt_update() {
  693. kv_cache_clear();
  694. system_tokens.clear();
  695. if (!system_prompt.empty()) {
  696. system_tokens = ::llama_tokenize(ctx, system_prompt, true);
  697. llama_batch_clear(batch);
  698. for (int i = 0; i < (int)system_tokens.size(); ++i)
  699. {
  700. llama_batch_add(batch, system_tokens[i], i, { 0 }, false);
  701. }
  702. for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += params.n_batch)
  703. {
  704. const int32_t n_tokens = std::min(params.n_batch, (int32_t) (batch.n_tokens - i));
  705. llama_batch batch_view = {
  706. n_tokens,
  707. batch.token + i,
  708. nullptr,
  709. batch.pos + i,
  710. batch.n_seq_id + i,
  711. batch.seq_id + i,
  712. batch.logits + i,
  713. 0, 0, 0, // unused
  714. };
  715. if (llama_decode(ctx, batch_view) != 0)
  716. {
  717. LOG_TEE("%s: llama_decode() failed\n", __func__);
  718. return;
  719. }
  720. }
  721. // assign the system KV cache to all parallel sequences
  722. for (int32_t i = 1; i < params.n_parallel; ++i)
  723. {
  724. llama_kv_cache_seq_cp(ctx, 0, i, 0, system_tokens.size());
  725. }
  726. }
  727. LOG_TEE("system prompt updated\n");
  728. system_need_update = false;
  729. }
  730. void system_prompt_notify() {
  731. // release all slots
  732. for (server_slot &slot : slots)
  733. {
  734. slot.release();
  735. }
  736. system_need_update = true;
  737. }
  738. static size_t find_stopping_strings(const std::string &text, const size_t last_token_size,
  739. const stop_type type, server_slot &slot)
  740. {
  741. size_t stop_pos = std::string::npos;
  742. for (const std::string &word : slot.params.antiprompt)
  743. {
  744. size_t pos;
  745. if (type == STOP_FULL)
  746. {
  747. const size_t tmp = word.size() + last_token_size;
  748. const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
  749. pos = text.find(word, from_pos);
  750. }
  751. else
  752. {
  753. pos = find_partial_stop_string(word, text);
  754. }
  755. if (pos != std::string::npos &&
  756. (stop_pos == std::string::npos || pos < stop_pos))
  757. {
  758. if (type == STOP_FULL)
  759. {
  760. slot.stopped_word = true;
  761. slot.stopping_word = word;
  762. slot.has_next_token = false;
  763. }
  764. stop_pos = pos;
  765. }
  766. }
  767. return stop_pos;
  768. }
  769. bool process_token(completion_token_output &result, server_slot &slot) {
  770. // remember which tokens were sampled - used for repetition penalties during sampling
  771. const std::string token_str = llama_token_to_piece(ctx, result.tok);
  772. slot.sampled = result.tok;
  773. // search stop word and delete it
  774. slot.generated_text += token_str;
  775. slot.has_next_token = true;
  776. if (slot.ctx_sampling->params.use_penalty_prompt_tokens && result.tok != -1)
  777. {
  778. // we can change penalty_prompt_tokens because it is always created from scratch each request
  779. slot.ctx_sampling->params.penalty_prompt_tokens.push_back(result.tok);
  780. }
  781. // check if there is incomplete UTF-8 character at the end
  782. bool incomplete = false;
  783. for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i)
  784. {
  785. unsigned char c = slot.generated_text[slot.generated_text.size() - i];
  786. if ((c & 0xC0) == 0x80)
  787. {
  788. // continuation byte: 10xxxxxx
  789. continue;
  790. }
  791. if ((c & 0xE0) == 0xC0)
  792. {
  793. // 2-byte character: 110xxxxx ...
  794. incomplete = i < 2;
  795. }
  796. else if ((c & 0xF0) == 0xE0)
  797. {
  798. // 3-byte character: 1110xxxx ...
  799. incomplete = i < 3;
  800. }
  801. else if ((c & 0xF8) == 0xF0)
  802. {
  803. // 4-byte character: 11110xxx ...
  804. incomplete = i < 4;
  805. }
  806. // else 1-byte character or invalid byte
  807. break;
  808. }
  809. if (!incomplete)
  810. {
  811. size_t pos = std::min(slot.n_sent_text, slot.generated_text.size());
  812. const std::string str_test = slot.generated_text.substr(pos);
  813. bool is_stop_full = false;
  814. size_t stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_FULL, slot);
  815. if (stop_pos != std::string::npos)
  816. {
  817. is_stop_full = true;
  818. slot.generated_text.erase(
  819. slot.generated_text.begin() + pos + stop_pos,
  820. slot.generated_text.end());
  821. pos = std::min(slot.n_sent_text, slot.generated_text.size());
  822. }
  823. else
  824. {
  825. is_stop_full = false;
  826. stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_PARTIAL, slot);
  827. }
  828. // check if there is any token to predict
  829. if (stop_pos == std::string::npos || (!slot.has_next_token && !is_stop_full && stop_pos > 0))
  830. {
  831. // no send the stop word in the response
  832. result.text_to_send = slot.generated_text.substr(pos, std::string::npos);
  833. slot.n_sent_text += result.text_to_send.size();
  834. // add the token to slot queue and cache
  835. }
  836. if (slot.params.stream)
  837. {
  838. send_partial_response(slot, result);
  839. }
  840. }
  841. slot.add_token_string(result);
  842. if (incomplete)
  843. {
  844. slot.has_next_token = true;
  845. }
  846. // check the limits
  847. if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params))
  848. {
  849. slot.stopped_limit = true;
  850. slot.has_next_token = false;
  851. }
  852. if (!slot.cache_tokens.empty() && llama_token_is_eog(model, result.tok))
  853. {
  854. slot.stopped_eos = true;
  855. slot.has_next_token = false;
  856. LOG_VERBOSE("eos token found", {});
  857. }
  858. LOG_VERBOSE("next token", {
  859. {"token", result.tok},
  860. {"token_text", tokens_to_output_formatted_string(ctx, result.tok)},
  861. {"has_next_token", slot.has_next_token},
  862. {"n_remain", slot.n_remaining},
  863. {"num_tokens_predicted", slot.n_decoded},
  864. {"stopped_eos", slot.stopped_eos},
  865. {"stopped_word", slot.stopped_word},
  866. {"stopped_limit", slot.stopped_limit},
  867. {"stopping_word", slot.stopping_word},
  868. });
  869. return slot.has_next_token; // continue
  870. }
  871. bool process_images(server_slot &slot) const
  872. {
  873. for (slot_image &img : slot.images)
  874. {
  875. if (!img.request_encode_image)
  876. {
  877. continue;
  878. }
  879. if (!llava_image_embed_make_with_clip_img(clp_ctx, params.n_threads, img.img_data, &img.image_embedding, &img.image_tokens)) {
  880. LOG_TEE("Error processing the given image");
  881. return false;
  882. }
  883. img.request_encode_image = false;
  884. }
  885. return slot.images.size() > 0;
  886. }
  887. void send_error(task_server& task, const std::string &error)
  888. {
  889. LOG_TEE("task %i - error: %s\n", task.id, error.c_str());
  890. task_result res;
  891. res.id = task.id;
  892. res.multitask_id = task.multitask_id;
  893. res.stop = false;
  894. res.error = true;
  895. res.result_json = { { "content", error } };
  896. queue_results.send(res);
  897. }
  898. json get_formated_generation(server_slot &slot)
  899. {
  900. const auto eos_bias = slot.sparams.logit_bias.find(llama_token_eos(model));
  901. const bool ignore_eos = eos_bias != slot.sparams.logit_bias.end() &&
  902. eos_bias->second < 0.0f && std::isinf(eos_bias->second);
  903. std::vector<std::string> samplers_sequence;
  904. for (const auto &sampler_type : slot.sparams.samplers_sequence)
  905. {
  906. samplers_sequence.emplace_back(llama_sampling_type_to_str(sampler_type));
  907. }
  908. return json {
  909. {"n_ctx", slot.n_ctx},
  910. {"n_predict", slot.n_predict},
  911. {"model", params.model_alias},
  912. {"seed", slot.params.seed},
  913. {"temperature", slot.sparams.temp},
  914. {"dynatemp_range", slot.sparams.dynatemp_range},
  915. {"dynatemp_exponent", slot.sparams.dynatemp_exponent},
  916. {"top_k", slot.sparams.top_k},
  917. {"top_p", slot.sparams.top_p},
  918. {"min_p", slot.sparams.min_p},
  919. {"tfs_z", slot.sparams.tfs_z},
  920. {"typical_p", slot.sparams.typical_p},
  921. {"repeat_last_n", slot.sparams.penalty_last_n},
  922. {"repeat_penalty", slot.sparams.penalty_repeat},
  923. {"presence_penalty", slot.sparams.penalty_present},
  924. {"frequency_penalty", slot.sparams.penalty_freq},
  925. {"penalty_prompt_tokens", slot.sparams.penalty_prompt_tokens},
  926. {"use_penalty_prompt_tokens", slot.sparams.use_penalty_prompt_tokens},
  927. {"mirostat", slot.sparams.mirostat},
  928. {"mirostat_tau", slot.sparams.mirostat_tau},
  929. {"mirostat_eta", slot.sparams.mirostat_eta},
  930. {"penalize_nl", slot.sparams.penalize_nl},
  931. {"stop", slot.params.antiprompt},
  932. {"n_predict", slot.params.n_predict},
  933. {"n_keep", params.n_keep},
  934. {"ignore_eos", ignore_eos},
  935. {"stream", slot.params.stream},
  936. {"logit_bias", slot.sparams.logit_bias},
  937. {"n_probs", slot.sparams.n_probs},
  938. {"min_keep", slot.sparams.min_keep},
  939. {"grammar", slot.sparams.grammar},
  940. {"samplers", samplers_sequence}
  941. };
  942. }
  943. void send_partial_response(server_slot &slot, completion_token_output tkn)
  944. {
  945. task_result res;
  946. res.id = slot.task_id;
  947. res.multitask_id = slot.multitask_id;
  948. res.error = false;
  949. res.stop = false;
  950. res.result_json = json
  951. {
  952. {"stop", false},
  953. {"slot_id", slot.id},
  954. {"multimodal", multimodal}
  955. };
  956. if (!llama_token_is_eog(model, tkn.tok)) {
  957. res.result_json["content"] = tkn.text_to_send;
  958. }
  959. if (slot.sparams.n_probs > 0)
  960. {
  961. std::vector<completion_token_output> probs_output = {};
  962. const std::vector<llama_token> to_send_toks = llama_tokenize(ctx, tkn.text_to_send, false);
  963. size_t probs_pos = std::min(slot.n_sent_token_probs, slot.generated_token_probs.size());
  964. size_t probs_stop_pos = std::min(slot.n_sent_token_probs + to_send_toks.size(), slot.generated_token_probs.size());
  965. if (probs_pos < probs_stop_pos)
  966. {
  967. probs_output = std::vector<completion_token_output>(slot.generated_token_probs.begin() + probs_pos, slot.generated_token_probs.begin() + probs_stop_pos);
  968. }
  969. slot.n_sent_token_probs = probs_stop_pos;
  970. res.result_json["completion_probabilities"] = probs_vector_to_json(ctx, probs_output);
  971. }
  972. queue_results.send(res);
  973. }
  974. void send_final_response(server_slot &slot)
  975. {
  976. task_result res;
  977. res.id = slot.task_id;
  978. res.multitask_id = slot.multitask_id;
  979. res.error = false;
  980. res.stop = true;
  981. res.result_json = json
  982. {
  983. {"content", !slot.params.stream ? slot.generated_text : ""},
  984. {"slot_id", slot.id},
  985. {"stop", true},
  986. {"model", params.model_alias},
  987. {"tokens_predicted", slot.n_decoded},
  988. {"tokens_evaluated", slot.n_prompt_tokens},
  989. {"truncated", slot.truncated},
  990. {"stopped_eos", slot.stopped_eos},
  991. {"stopped_word", slot.stopped_word},
  992. {"stopped_limit", slot.stopped_limit},
  993. {"stopping_word", slot.stopping_word},
  994. {"tokens_cached", slot.n_past},
  995. {"timings", slot.get_formated_timings()}
  996. };
  997. if (slot.sparams.n_probs > 0)
  998. {
  999. std::vector<completion_token_output> probs = {};
  1000. if (!slot.params.stream && slot.stopped_word)
  1001. {
  1002. const std::vector<llama_token> stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false);
  1003. probs = std::vector<completion_token_output>(slot.generated_token_probs.begin(), slot.generated_token_probs.end() - stop_word_toks.size());
  1004. }
  1005. else
  1006. {
  1007. probs = std::vector<completion_token_output>(
  1008. slot.generated_token_probs.begin(),
  1009. slot.generated_token_probs.end());
  1010. }
  1011. res.result_json["completion_probabilities"] = probs_vector_to_json(ctx, probs);
  1012. }
  1013. queue_results.send(res);
  1014. }
  1015. void send_embedding(server_slot & slot, const llama_batch & batch)
  1016. {
  1017. task_result res;
  1018. res.id = slot.task_id;
  1019. res.multitask_id = slot.multitask_id;
  1020. res.error = false;
  1021. res.stop = true;
  1022. const int n_embd = llama_n_embd(model);
  1023. if (!params.embedding)
  1024. {
  1025. LOG_WARNING("embedding disabled", {{"params.embedding", params.embedding}});
  1026. res.result_json = json
  1027. {
  1028. {"embedding", std::vector<float>(n_embd, 0.0f)},
  1029. };
  1030. }
  1031. else
  1032. {
  1033. for (int i = 0; i < batch.n_tokens; ++i) {
  1034. if (!batch.logits[i] || batch.seq_id[i][0] != slot.id) {
  1035. continue;
  1036. }
  1037. const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
  1038. if (embd == NULL) {
  1039. embd = llama_get_embeddings_ith(ctx, i);
  1040. if (embd == NULL) {
  1041. LOG_ERROR("failed to get embeddings for token", {{"token", batch.token[i]}, {"seq_id", batch.seq_id[i][0]}});
  1042. res.result_json = json
  1043. {
  1044. {"embedding", std::vector<float>(n_embd, 0.0f)},
  1045. };
  1046. continue;
  1047. }
  1048. }
  1049. res.result_json = json
  1050. {
  1051. {"embedding", std::vector<float>(embd, embd + n_embd)},
  1052. };
  1053. }
  1054. }
  1055. queue_results.send(res);
  1056. }
  1057. void request_completion(int task_id, json data, bool embedding, int multitask_id)
  1058. {
  1059. task_server task;
  1060. task.id = task_id;
  1061. task.target_id = 0;
  1062. task.data = std::move(data);
  1063. task.embedding_mode = embedding;
  1064. task.type = TASK_TYPE_COMPLETION;
  1065. task.multitask_id = multitask_id;
  1066. // when a completion task's prompt array is not a singleton, we split it into multiple requests
  1067. // otherwise, it's a single-prompt task, we actually queue it
  1068. // if there's numbers in the prompt array it will be treated as an array of tokens
  1069. if (task.data.count("prompt") != 0 && task.data.at("prompt").size() > 1) {
  1070. bool numbers = false;
  1071. for (const auto& e : task.data.at("prompt")) {
  1072. if (e.is_number()) {
  1073. numbers = true;
  1074. break;
  1075. }
  1076. }
  1077. // NOTE: split_multiprompt_task() does not handle a mix of strings and numbers,
  1078. // it will completely stall the server. I don't know where the bug for this is.
  1079. //
  1080. // if there are numbers, it needs to be treated like a single prompt,
  1081. // queue_tasks handles a mix of strings and numbers just fine.
  1082. if (numbers) {
  1083. queue_tasks.post(task);
  1084. } else {
  1085. split_multiprompt_task(task_id, task);
  1086. }
  1087. } else {
  1088. // an empty prompt can make slot become buggy
  1089. if (task.data.contains("prompt") && task.data["prompt"].is_string() && task.data["prompt"].get<std::string>().empty()) {
  1090. task.data["prompt"] = " "; // add a space so that we have one token
  1091. }
  1092. queue_tasks.post(task);
  1093. }
  1094. }
  1095. // for multiple images processing
  1096. bool ingest_images(server_slot &slot, int n_batch)
  1097. {
  1098. int image_idx = 0;
  1099. while (image_idx < (int) slot.images.size())
  1100. {
  1101. slot_image &img = slot.images[image_idx];
  1102. // process prefix prompt
  1103. for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch)
  1104. {
  1105. const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i));
  1106. llama_batch batch_view = {
  1107. n_tokens,
  1108. batch.token + i,
  1109. nullptr,
  1110. batch.pos + i,
  1111. batch.n_seq_id + i,
  1112. batch.seq_id + i,
  1113. batch.logits + i,
  1114. 0, 0, 0, // unused
  1115. };
  1116. if (llama_decode(ctx, batch_view))
  1117. {
  1118. LOG_TEE("%s : failed to eval\n", __func__);
  1119. return false;
  1120. }
  1121. }
  1122. // process image with llm
  1123. for (int i = 0; i < img.image_tokens; i += n_batch)
  1124. {
  1125. int n_eval = img.image_tokens - i;
  1126. if (n_eval > n_batch)
  1127. {
  1128. n_eval = n_batch;
  1129. }
  1130. const int n_embd = llama_n_embd(model);
  1131. llama_batch batch_img = {
  1132. n_eval,
  1133. nullptr,
  1134. (img.image_embedding + i * n_embd),
  1135. nullptr,
  1136. nullptr,
  1137. nullptr,
  1138. nullptr,
  1139. slot.n_past,
  1140. 1, 0
  1141. };
  1142. if (llama_decode(ctx, batch_img))
  1143. {
  1144. LOG_TEE("%s : failed to eval image\n", __func__);
  1145. return false;
  1146. }
  1147. slot.n_past += n_eval;
  1148. }
  1149. image_idx++;
  1150. llama_batch_clear(batch);
  1151. // append prefix of next image
  1152. const auto json_prompt = (image_idx >= (int) slot.images.size()) ?
  1153. slot.params.input_suffix : // no more images, then process suffix prompt
  1154. (json)(slot.images[image_idx].prefix_prompt);
  1155. std::vector<llama_token> append_tokens = tokenize(json_prompt, false); // has next image
  1156. for (int i = 0; i < (int) append_tokens.size(); ++i)
  1157. {
  1158. llama_batch_add(batch, append_tokens[i], system_tokens.size() + slot.n_past, { slot.id }, true);
  1159. slot.n_past += 1;
  1160. }
  1161. }
  1162. return true;
  1163. }
  1164. void request_cancel(int task_id)
  1165. {
  1166. task_server task;
  1167. task.type = TASK_TYPE_CANCEL;
  1168. task.target_id = task_id;
  1169. queue_tasks.post(task);
  1170. }
  1171. void split_multiprompt_task(int multitask_id, task_server& multiprompt_task)
  1172. {
  1173. int prompt_count = multiprompt_task.data.at("prompt").size();
  1174. if (prompt_count <= 1) {
  1175. send_error(multiprompt_task, "error while handling multiple prompts");
  1176. return;
  1177. }
  1178. // generate all the ID for subtask
  1179. std::vector<int> subtask_ids(prompt_count);
  1180. for (int i = 0; i < prompt_count; i++)
  1181. {
  1182. subtask_ids[i] = queue_tasks.get_new_id();
  1183. }
  1184. // queue up the multitask so we can track its subtask progression
  1185. queue_tasks.add_multitask(multitask_id, subtask_ids);
  1186. // add subtasks
  1187. for (int i = 0; i < prompt_count; i++)
  1188. {
  1189. json subtask_data = multiprompt_task.data;
  1190. subtask_data["prompt"] = subtask_data["prompt"][i];
  1191. // subtasks inherit everything else (embedding mode, etc.)
  1192. request_completion(subtask_ids[i], subtask_data, multiprompt_task.embedding_mode, multitask_id);
  1193. }
  1194. }
  1195. std::string common_prefix(const std::string& str1, const std::string& str2) {
  1196. auto mismatch_pair = std::mismatch(str1.begin(), str1.end(), str2.begin());
  1197. return std::string(str1.begin(), mismatch_pair.first);
  1198. }
  1199. // Find the slot that has the greatest common prefix
  1200. server_slot *prefix_slot(const json &prompt) {
  1201. if (!prompt.is_string()) {
  1202. return nullptr;
  1203. }
  1204. std::string prompt_str = prompt.get<std::string>();
  1205. server_slot *slot = nullptr;
  1206. size_t longest = 0;
  1207. for (server_slot &s : slots) {
  1208. if (s.available() && s.prompt.is_string()) {
  1209. std::string s_prompt = s.prompt.get<std::string>();
  1210. std::string prefix = common_prefix(s_prompt, prompt_str);
  1211. if (prefix.size() > longest) {
  1212. slot = &s;
  1213. longest = prefix.size();
  1214. }
  1215. }
  1216. }
  1217. if (!slot) {
  1218. return get_slot(-1);
  1219. }
  1220. LOG_DEBUG("slot with common prefix found", {{
  1221. "slot_id", slot->id,
  1222. "characters", longest
  1223. }});
  1224. return slot;
  1225. }
  1226. void process_single_task(task_server& task)
  1227. {
  1228. switch (task.type)
  1229. {
  1230. case TASK_TYPE_COMPLETION: {
  1231. server_slot *slot = prefix_slot(task.data["prompt"]);
  1232. if (slot == nullptr)
  1233. {
  1234. // if no slot is available, we defer this task for processing later
  1235. LOG_VERBOSE("no slot is available", {{"task_id", task.id}});
  1236. queue_tasks.defer(task);
  1237. break;
  1238. }
  1239. slot->reset();
  1240. slot->embedding = task.embedding_mode;
  1241. slot->task_id = task.id;
  1242. slot->multitask_id = task.multitask_id;
  1243. if (!launch_slot_with_data(slot, task.data))
  1244. {
  1245. // send error result
  1246. send_error(task, "internal_error");
  1247. break;
  1248. }
  1249. } break;
  1250. case TASK_TYPE_CANCEL: { // release slot linked with the task id
  1251. for (auto & slot : slots)
  1252. {
  1253. if (slot.task_id == task.target_id)
  1254. {
  1255. slot.release();
  1256. break;
  1257. }
  1258. }
  1259. } break;
  1260. case TASK_TYPE_NEXT_RESPONSE: {
  1261. // do nothing
  1262. } break;
  1263. case TASK_TYPE_METRICS: {
  1264. json slots_data = json::array();
  1265. int n_idle_slots = 0;
  1266. int n_processing_slots = 0;
  1267. for (server_slot &slot: slots) {
  1268. json slot_data = get_formated_generation(slot);
  1269. slot_data["id"] = slot.id;
  1270. slot_data["task_id"] = slot.task_id;
  1271. slot_data["state"] = slot.state;
  1272. slot_data["prompt"] = slot.prompt;
  1273. slot_data["next_token"] = {
  1274. {"has_next_token", slot.has_next_token},
  1275. {"n_remain", slot.n_remaining},
  1276. {"num_tokens_predicted", slot.n_decoded},
  1277. {"stopped_eos", slot.stopped_eos},
  1278. {"stopped_word", slot.stopped_word},
  1279. {"stopped_limit", slot.stopped_limit},
  1280. {"stopping_word", slot.stopping_word},
  1281. };
  1282. if (slot_data["state"] == IDLE) {
  1283. n_idle_slots++;
  1284. } else {
  1285. n_processing_slots++;
  1286. }
  1287. slots_data.push_back(slot_data);
  1288. }
  1289. LOG_DEBUG("slot data", {
  1290. {"task_id", task.id},
  1291. {"n_idle_slots", n_idle_slots},
  1292. {"n_processing_slots", n_processing_slots}
  1293. });
  1294. LOG_VERBOSE("slot data", {
  1295. {"task_id", task.id},
  1296. {"n_idle_slots", n_idle_slots},
  1297. {"n_processing_slots", n_processing_slots},
  1298. {"slots", slots_data}
  1299. });
  1300. task_result res;
  1301. res.id = task.id;
  1302. res.multitask_id = task.multitask_id;
  1303. res.stop = true;
  1304. res.error = false;
  1305. res.result_json = {
  1306. { "idle", n_idle_slots },
  1307. { "processing", n_processing_slots },
  1308. { "deferred", queue_tasks.queue_tasks_deferred.size() },
  1309. { "n_prompt_tokens_processed_total", metrics.n_prompt_tokens_processed_total},
  1310. { "n_tokens_predicted_total", metrics.n_tokens_predicted_total},
  1311. { "n_prompt_tokens_processed", metrics.n_prompt_tokens_processed},
  1312. { "t_prompt_processing", metrics.t_prompt_processing},
  1313. { "n_tokens_predicted", metrics.n_tokens_predicted},
  1314. { "t_tokens_generation", metrics.t_tokens_generation},
  1315. { "kv_cache_tokens_count", llama_get_kv_cache_token_count(ctx)},
  1316. { "kv_cache_used_cells", llama_get_kv_cache_used_cells(ctx)},
  1317. { "slots", slots_data },
  1318. };
  1319. metrics.reset_bucket();
  1320. queue_results.send(res);
  1321. } break;
  1322. }
  1323. }
  1324. void on_finish_multitask(task_multi& multitask)
  1325. {
  1326. // all subtasks done == multitask is done
  1327. task_result result;
  1328. result.id = multitask.id;
  1329. result.stop = true;
  1330. result.error = false;
  1331. // collect json results into one json result
  1332. std::vector<json> result_jsons;
  1333. for (auto& subres : multitask.results)
  1334. {
  1335. result_jsons.push_back(subres.result_json);
  1336. result.error = result.error && subres.error;
  1337. }
  1338. result.result_json = json{ { "results", result_jsons } };
  1339. queue_results.send(result);
  1340. }
  1341. bool update_slots() {
  1342. if (system_need_update)
  1343. {
  1344. LOG_DEBUG("updating system prompt", {});
  1345. system_prompt_update();
  1346. }
  1347. llama_batch_clear(batch);
  1348. if (all_slots_are_idle)
  1349. {
  1350. if (system_prompt.empty() && clean_kv_cache)
  1351. {
  1352. LOG_DEBUG("all slots are idle and system prompt is empty, clear the KV cache", {});
  1353. kv_cache_clear();
  1354. }
  1355. return true;
  1356. }
  1357. LOG_VERBOSE("posting NEXT_RESPONSE", {});
  1358. task_server task;
  1359. task.type = TASK_TYPE_NEXT_RESPONSE;
  1360. task.target_id = -1;
  1361. queue_tasks.post(task);
  1362. for (server_slot &slot : slots)
  1363. {
  1364. if (slot.ga_n == 1)
  1365. {
  1366. if (slot.is_processing() && system_tokens.size() + slot.cache_tokens.size() >= (size_t) slot.n_ctx)
  1367. {
  1368. // Shift context
  1369. const int n_keep = slot.params.n_keep + add_bos_token;
  1370. const int n_left = (int) system_tokens.size() + slot.n_past - n_keep;
  1371. const int n_discard = n_left / 2;
  1372. LOG_DEBUG("slot context shift", {
  1373. {"slot_id", slot.id},
  1374. {"task_id", slot.task_id},
  1375. {"n_keep", n_keep},
  1376. {"n_left", n_left},
  1377. {"n_discard", n_discard},
  1378. {"n_ctx", n_ctx},
  1379. {"n_past", slot.n_past},
  1380. {"n_system_tokens", system_tokens.size()},
  1381. {"n_cache_tokens", slot.cache_tokens.size()}
  1382. });
  1383. llama_kv_cache_seq_rm (ctx, slot.id, n_keep , n_keep + n_discard);
  1384. llama_kv_cache_seq_add(ctx, slot.id, n_keep + n_discard, system_tokens.size() + slot.n_past, -n_discard);
  1385. for (size_t i = n_keep + n_discard; i < slot.cache_tokens.size(); i++)
  1386. {
  1387. slot.cache_tokens[i - n_discard] = slot.cache_tokens[i];
  1388. }
  1389. slot.cache_tokens.resize(slot.cache_tokens.size() - n_discard);
  1390. slot.n_past -= n_discard;
  1391. slot.truncated = true;
  1392. }
  1393. }
  1394. }
  1395. // decode any currently ongoing sequences
  1396. LOG_VERBOSE("decoding ongoing sequences", {});
  1397. for (auto & slot : slots)
  1398. {
  1399. // release the slot
  1400. if (slot.command == RELEASE)
  1401. {
  1402. slot.state = IDLE;
  1403. slot.command = NONE;
  1404. slot.t_last_used = ggml_time_us();
  1405. LOG_DEBUG("slot released", {
  1406. {"slot_id", slot.id},
  1407. {"task_id", slot.task_id},
  1408. {"n_ctx", n_ctx},
  1409. {"n_past", slot.n_past},
  1410. {"n_system_tokens", system_tokens.size()},
  1411. {"n_cache_tokens", slot.cache_tokens.size()},
  1412. {"truncated", slot.truncated}
  1413. });
  1414. queue_tasks.notify_slot_changed();
  1415. continue;
  1416. }
  1417. if (slot.state == IDLE)
  1418. {
  1419. continue;
  1420. }
  1421. slot.i_batch = batch.n_tokens;
  1422. const int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1423. // TODO: we always have to take into account the "system_tokens"
  1424. // this is not great and needs to be improved somehow
  1425. llama_batch_add(batch, slot.sampled, system_tokens.size() + slot_npast, { slot.id }, true);
  1426. slot.n_past += 1;
  1427. }
  1428. // process in chunks of params.n_batch
  1429. int32_t n_batch = params.n_batch;
  1430. // assign workload to the slots
  1431. if (params.cont_batching || batch.n_tokens == 0)
  1432. {
  1433. for (auto & slot : slots)
  1434. {
  1435. const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get<std::string>().empty()) || !slot.images.empty();
  1436. // empty prompt passed -> release the slot and send empty response
  1437. if (slot.state == IDLE && slot.command == LOAD_PROMPT && !has_prompt)
  1438. {
  1439. slot.release();
  1440. slot.print_timings();
  1441. send_final_response(slot);
  1442. continue;
  1443. }
  1444. // need process the prompt
  1445. if (slot.state == IDLE && slot.command == LOAD_PROMPT)
  1446. {
  1447. slot.state = PROCESSING;
  1448. slot.command = NONE;
  1449. std::vector<llama_token> prompt_tokens;
  1450. slot.t_start_process_prompt = ggml_time_us();
  1451. slot.t_start_genereration = 0;
  1452. prompt_tokens = tokenize(slot.prompt, system_prompt.empty()); // add BOS if there isn't system prompt
  1453. slot.n_prompt_tokens = prompt_tokens.size();
  1454. if (slot.params.n_keep < 0)
  1455. {
  1456. slot.params.n_keep = slot.n_prompt_tokens;
  1457. }
  1458. slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep);
  1459. // if input prompt is too big, truncate it, if group attention self-extend is disabled
  1460. if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx)
  1461. {
  1462. const int n_left = slot.n_ctx - slot.params.n_keep;
  1463. const int n_shift = n_left / 2;
  1464. const int n_erase = slot.n_prompt_tokens - slot.params.n_keep - n_shift;
  1465. std::vector<llama_token> new_tokens(
  1466. prompt_tokens.begin(),
  1467. prompt_tokens.begin() + slot.params.n_keep);
  1468. new_tokens.insert(
  1469. new_tokens.end(),
  1470. prompt_tokens.begin() + slot.params.n_keep + n_erase,
  1471. prompt_tokens.end());
  1472. LOG_INFO("input truncated", {
  1473. {"n_ctx", slot.n_ctx},
  1474. {"n_keep", slot.params.n_keep},
  1475. {"n_left", n_left},
  1476. {"n_shift", n_shift},
  1477. {"n_erase", n_erase},
  1478. });
  1479. slot.truncated = true;
  1480. prompt_tokens = new_tokens;
  1481. slot.n_prompt_tokens = prompt_tokens.size();
  1482. GGML_ASSERT(slot.n_prompt_tokens < slot.n_ctx);
  1483. }
  1484. if (!slot.params.cache_prompt)
  1485. {
  1486. llama_sampling_reset(slot.ctx_sampling);
  1487. slot.n_past = 0;
  1488. slot.n_past_se = 0;
  1489. slot.ga_i = 0;
  1490. slot.n_prompt_tokens_processed = slot.n_prompt_tokens;
  1491. }
  1492. else
  1493. {
  1494. // push the prompt into the sampling context (do not apply grammar)
  1495. for (auto &token : prompt_tokens)
  1496. {
  1497. llama_sampling_accept(slot.ctx_sampling, ctx, token, false);
  1498. }
  1499. slot.n_past = common_part(slot.cache_tokens, prompt_tokens);
  1500. // the last token of the cache is not in the KV cache until the next call to llama_decode
  1501. // (it was sampled, pushed into the "cache_tokens", but not yet put in the context)
  1502. if (slot.n_past > 0 && slot.n_past == (int32_t) slot.cache_tokens.size())
  1503. {
  1504. slot.n_past -= 1;
  1505. }
  1506. slot.n_prompt_tokens_processed = slot.n_prompt_tokens;
  1507. if (slot.ga_n != 1)
  1508. {
  1509. int ga_i = 0;
  1510. int32_t ga_n = slot.ga_n;
  1511. int32_t ga_w = slot.ga_w;
  1512. int32_t slot_npast = 0;
  1513. for (int k = 0; k < slot.n_past; ++k)
  1514. {
  1515. while (slot_npast >= ga_i + ga_w) {
  1516. const int bd = (ga_w/ga_n)*(ga_n - 1);
  1517. slot_npast -= bd;
  1518. ga_i += ga_w/ga_n;
  1519. }
  1520. slot_npast++;
  1521. }
  1522. slot.n_past_se = slot_npast;
  1523. slot.ga_i = ga_i;
  1524. }
  1525. LOG_DEBUG("slot progression", {
  1526. { "slot_id", slot.id },
  1527. { "task_id", slot.task_id },
  1528. { "n_past", slot.n_past },
  1529. { "n_past_se", slot.n_past_se },
  1530. { "ga_i", slot.ga_i },
  1531. { "n_prompt_tokens_processed", slot.n_prompt_tokens_processed }
  1532. });
  1533. }
  1534. slot.cache_tokens = prompt_tokens;
  1535. if (slot.n_past == slot.n_prompt_tokens && slot.n_past > 0)
  1536. {
  1537. // we have to evaluate at least 1 token to generate logits.
  1538. LOG_DEBUG("we have to evaluate at least 1 token to generate logits", {
  1539. { "slot_id", slot.id },
  1540. { "task_id", slot.task_id }
  1541. });
  1542. slot.n_past--;
  1543. if (slot.ga_i > 0)
  1544. {
  1545. slot.n_past_se--;
  1546. }
  1547. }
  1548. int p0 = (int) system_tokens.size() + slot.n_past;
  1549. LOG_DEBUG("kv cache rm [p0, end)", {
  1550. { "slot_id", slot.id },
  1551. { "task_id", slot.task_id },
  1552. { "p0", p0 }
  1553. });
  1554. llama_kv_cache_seq_rm(ctx, slot.id, p0, -1);
  1555. LOG_VERBOSE("prompt ingested", {
  1556. {"n_past", slot.n_past},
  1557. {"cached", tokens_to_str(ctx, slot.cache_tokens.cbegin(), slot.cache_tokens.cbegin() + slot.n_past)},
  1558. {"to_eval", tokens_to_str(ctx, slot.cache_tokens.cbegin() + slot.n_past, slot.cache_tokens.cend())},
  1559. });
  1560. const bool has_images = process_images(slot);
  1561. // process the prefix of first image
  1562. std::vector<llama_token> prefix_tokens = has_images ? tokenize(slot.images[0].prefix_prompt, add_bos_token) : prompt_tokens;
  1563. int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
  1564. int32_t ga_i = slot.ga_i;
  1565. int32_t ga_n = slot.ga_n;
  1566. int32_t ga_w = slot.ga_w;
  1567. for (; slot.n_past < (int) prefix_tokens.size(); ++slot.n_past)
  1568. {
  1569. if (slot.ga_n != 1)
  1570. {
  1571. while (slot_npast >= ga_i + ga_w) {
  1572. const int bd = (ga_w/ga_n)*(ga_n - 1);
  1573. slot_npast -= bd;
  1574. ga_i += ga_w/ga_n;
  1575. }
  1576. }
  1577. llama_batch_add(batch, prefix_tokens[slot.n_past], system_tokens.size() + slot_npast, { slot.id }, false);
  1578. slot_npast++;
  1579. }
  1580. if (has_images && !ingest_images(slot, n_batch))
  1581. {
  1582. LOG_ERROR("failed processing images", {
  1583. {"slot_id", slot.id},
  1584. {"task_id", slot.task_id},
  1585. });
  1586. // FIXME @phymbert: to be properly tested
  1587. // early returning without changing the slot state will block the slot for ever
  1588. // no one at the moment is checking the return value
  1589. return false;
  1590. }
  1591. // extract the logits only for the last token
  1592. if (batch.n_tokens > 0)
  1593. {
  1594. batch.logits[batch.n_tokens - 1] = true;
  1595. }
  1596. slot.n_decoded = 0;
  1597. slot.i_batch = batch.n_tokens - 1;
  1598. }
  1599. }
  1600. }
  1601. if (batch.n_tokens == 0)
  1602. {
  1603. all_slots_are_idle = true;
  1604. return true;
  1605. }
  1606. for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch)
  1607. {
  1608. const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i);
  1609. for (auto & slot : slots)
  1610. {
  1611. if (slot.ga_n != 1)
  1612. {
  1613. // context extension via Self-Extend
  1614. while (slot.n_past_se >= slot.ga_i + slot.ga_w)
  1615. {
  1616. const int ib = (slot.ga_n * slot.ga_i) / slot.ga_w;
  1617. const int bd = (slot.ga_w / slot.ga_n) * (slot.ga_n - 1);
  1618. const int dd = (slot.ga_w / slot.ga_n) - ib * bd - slot.ga_w;
  1619. LOG_TEE("\n");
  1620. LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i, slot.n_past_se, ib * bd, slot.ga_i + ib * bd, slot.n_past_se + ib * bd);
  1621. LOG_TEE("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w, slot.ga_n, (slot.ga_i + ib * bd) / slot.ga_n, (slot.ga_i + ib * bd + slot.ga_w) / slot.ga_n);
  1622. LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd + slot.ga_w, slot.n_past_se + ib * bd, dd, slot.ga_i + ib * bd + slot.ga_w + dd, slot.n_past_se + ib * bd + dd);
  1623. llama_kv_cache_seq_add(ctx, slot.id, slot.ga_i, slot.n_past_se, ib * bd);
  1624. llama_kv_cache_seq_div(ctx, slot.id, slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w,slot.ga_n);
  1625. llama_kv_cache_seq_add(ctx, slot.id, slot.ga_i + ib * bd + slot.ga_w,slot.n_past_se + ib * bd, dd);
  1626. slot.n_past_se -= bd;
  1627. slot.ga_i += slot.ga_w / slot.ga_n;
  1628. LOG_TEE("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", slot.n_past_se + bd, slot.n_past_se, slot.ga_i);
  1629. }
  1630. slot.n_past_se += n_tokens;
  1631. }
  1632. }
  1633. llama_batch batch_view =
  1634. {
  1635. n_tokens,
  1636. batch.token + i,
  1637. nullptr,
  1638. batch.pos + i,
  1639. batch.n_seq_id + i,
  1640. batch.seq_id + i,
  1641. batch.logits + i,
  1642. 0, 0, 0, // unused
  1643. };
  1644. const int ret = llama_decode(ctx, batch_view);
  1645. if (ret != 0)
  1646. {
  1647. if (n_batch == 1 || ret < 0)
  1648. {
  1649. // if you get here, it means the KV cache is full - try increasing it via the context size
  1650. LOG_TEE("%s : failed to decode the batch, n_batch = %d, ret = %d\n", __func__, n_batch, ret);
  1651. return false;
  1652. }
  1653. LOG_TEE("%s : failed to find free space in the KV cache, retrying with smaller n_batch = %d\n", __func__, n_batch / 2);
  1654. // retry with half the batch size to try to find a free slot in the KV cache
  1655. n_batch /= 2;
  1656. i -= n_batch;
  1657. continue;
  1658. }
  1659. for (auto & slot : slots)
  1660. {
  1661. if (slot.i_batch < (int) i || slot.i_batch >= (int) (i + n_tokens))
  1662. {
  1663. continue;
  1664. }
  1665. // prompt evaluated for embedding
  1666. if (slot.embedding)
  1667. {
  1668. send_embedding(slot, batch_view);
  1669. slot.release();
  1670. slot.i_batch = -1;
  1671. continue;
  1672. }
  1673. completion_token_output result;
  1674. const llama_token id = llama_sampling_sample(slot.ctx_sampling, ctx, NULL, slot.i_batch - i);
  1675. llama_sampling_accept(slot.ctx_sampling, ctx, id, true);
  1676. slot.n_decoded += 1;
  1677. if (slot.n_decoded == 1)
  1678. {
  1679. slot.t_start_genereration = ggml_time_us();
  1680. slot.t_prompt_processing = (slot.t_start_genereration - slot.t_start_process_prompt) / 1e3;
  1681. metrics.on_prompt_eval(slot);
  1682. }
  1683. llama_token_data_array cur_p = { slot.ctx_sampling->cur.data(), slot.ctx_sampling->cur.size(), false };
  1684. result.tok = id;
  1685. const int32_t n_probs = slot.sparams.n_probs;
  1686. if (slot.sparams.temp <= 0 && n_probs > 0)
  1687. {
  1688. // for llama_sample_token_greedy we need to sort candidates
  1689. llama_sample_softmax(ctx, &cur_p);
  1690. }
  1691. for (size_t i = 0; i < std::min(cur_p.size, (size_t)n_probs); ++i)
  1692. {
  1693. result.probs.push_back({cur_p.data[i].id, cur_p.data[i].p});
  1694. }
  1695. if (!process_token(result, slot))
  1696. {
  1697. slot.release();
  1698. slot.print_timings();
  1699. send_final_response(slot);
  1700. metrics.on_prediction(slot);
  1701. }
  1702. slot.i_batch = -1;
  1703. }
  1704. }
  1705. LOG_VERBOSE("slots updated", {});
  1706. return true;
  1707. }
  1708. json model_meta() {
  1709. return json{
  1710. {"vocab_type", llama_vocab_type(model)},
  1711. {"n_vocab", llama_n_vocab(model)},
  1712. {"n_ctx_train", llama_n_ctx_train(model)},
  1713. {"n_embd", llama_n_embd(model)},
  1714. {"n_params", llama_model_n_params(model)},
  1715. {"size", llama_model_size(model)},
  1716. };
  1717. }
  1718. };
  1719. static void server_print_usage(const char *argv0, const gpt_params &params,
  1720. const server_params &sparams)
  1721. {
  1722. printf("usage: %s [options]\n", argv0);
  1723. printf("\n");
  1724. printf("options:\n");
  1725. printf(" -h, --help show this help message and exit\n");
  1726. printf(" -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled");
  1727. printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
  1728. printf(" -tb N, --threads-batch N number of threads to use during batch and prompt processing (default: same as --threads)\n");
  1729. printf(" --threads-http N number of threads in the http server pool to process requests (default: max(hardware concurrency - 1, --parallel N + 2))\n");
  1730. printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
  1731. printf(" --rope-scaling {none,linear,yarn}\n");
  1732. printf(" RoPE frequency scaling method, defaults to linear unless specified by the model\n");
  1733. printf(" --rope-freq-base N RoPE base frequency (default: loaded from model)\n");
  1734. printf(" --rope-freq-scale N RoPE frequency scaling factor, expands context by a factor of 1/N\n");
  1735. printf(" --yarn-ext-factor N YaRN: extrapolation mix factor (default: 1.0, 0.0 = full interpolation)\n");
  1736. printf(" --yarn-attn-factor N YaRN: scale sqrt(t) or attention magnitude (default: 1.0)\n");
  1737. printf(" --yarn-beta-slow N YaRN: high correction dim or alpha (default: %.1f)\n", params.yarn_beta_slow);
  1738. printf(" --yarn-beta-fast N YaRN: low correction dim or beta (default: %.1f)\n", params.yarn_beta_fast);
  1739. printf(" --pooling {none,mean,cls}\n");
  1740. printf(" pooling type for embeddings, use model default if unspecified\n");
  1741. printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
  1742. printf(" --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
  1743. printf(" not recommended: doubles context memory required and no measurable increase in quality\n");
  1744. if (llama_supports_mlock())
  1745. {
  1746. printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
  1747. }
  1748. if (llama_supports_mmap())
  1749. {
  1750. printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
  1751. }
  1752. printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n");
  1753. printf(" - distribute: spread execution evenly over all nodes\n");
  1754. printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n");
  1755. printf(" - numactl: use the CPU map provided my numactl\n");
  1756. if (llama_supports_gpu_offload()) {
  1757. printf(" -ngl N, --n-gpu-layers N\n");
  1758. printf(" number of layers to store in VRAM\n");
  1759. printf(" -sm SPLIT_MODE, --split-mode SPLIT_MODE\n");
  1760. printf(" how to split the model across multiple GPUs, one of:\n");
  1761. printf(" - none: use one GPU only\n");
  1762. printf(" - layer (default): split layers and KV across GPUs\n");
  1763. printf(" - row: split rows across GPUs\n");
  1764. printf(" -ts SPLIT --tensor-split SPLIT\n");
  1765. printf(" fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1\n");
  1766. printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n");
  1767. printf(" or for intermediate results and KV (with split-mode = row)\n");
  1768. }
  1769. printf(" -m FNAME, --model FNAME\n");
  1770. printf(" model path (default: %s)\n", params.model.c_str());
  1771. printf(" -a ALIAS, --alias ALIAS\n");
  1772. printf(" set an alias for the model, will be added as `model` field in completion response\n");
  1773. printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
  1774. printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
  1775. printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
  1776. printf(" --port PORT port to listen (default (default: %d)\n", sparams.port);
  1777. printf(" --path PUBLIC_PATH path from which to serve static files (default %s)\n", sparams.public_path.c_str());
  1778. printf(" --api-key API_KEY optional api key to enhance server security. If set, requests must include this key for access.\n");
  1779. printf(" --api-key-file FNAME path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access.\n");
  1780. printf(" -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
  1781. printf(" --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
  1782. printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
  1783. printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
  1784. printf(" -fa, --flash-attn enable Flash Attention (default: %s)\n", params.flash_attn ? "enabled" : "disabled");
  1785. printf(" -spf FNAME, --system-prompt-file FNAME\n");
  1786. printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
  1787. printf(" -ctk TYPE, --cache-type-k TYPE\n");
  1788. printf(" KV cache data type for K (default: f16)\n");
  1789. printf(" -ctv TYPE, --cache-type-v TYPE\n");
  1790. printf(" KV cache data type for V (default: f16)\n");
  1791. printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA.\n");
  1792. printf(" --log-format log output format: json or text (default: json)\n");
  1793. printf(" --log-disable disables logging to a file.\n");
  1794. printf(" --slots-endpoint-disable disables slots monitoring endpoint.\n");
  1795. printf(" --metrics enable prometheus compatible metrics endpoint (default: %s).\n", sparams.metrics_endpoint ? "enabled" : "disabled");
  1796. printf("\n");
  1797. printf(" -n, --n-predict maximum tokens to predict (default: %d)\n", params.n_predict);
  1798. printf(" --override-kv KEY=TYPE:VALUE\n");
  1799. printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
  1800. printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
  1801. printf(" -gan N, --grp-attn-n N set the group attention factor to extend context size through self-extend(default: 1=disabled), used together with group attention width `--grp-attn-w`\n");
  1802. printf(" -gaw N, --grp-attn-w N set the group attention width to extend context size through self-extend(default: 512), used together with group attention factor `--grp-attn-n`\n");
  1803. printf(" --chat-template JINJA_TEMPLATE\n");
  1804. printf(" set custom jinja chat template (default: template taken from model's metadata)\n");
  1805. printf(" Note: only commonly used templates are accepted, since we don't have jinja parser\n");
  1806. printf("\n");
  1807. }
  1808. static void server_params_parse(int argc, char **argv, server_params &sparams, gpt_params &params)
  1809. {
  1810. gpt_params default_params;
  1811. server_params default_sparams;
  1812. std::string arg;
  1813. bool invalid_param = false;
  1814. for (int i = 1; i < argc; i++)
  1815. {
  1816. arg = argv[i];
  1817. if (arg == "--port")
  1818. {
  1819. if (++i >= argc)
  1820. {
  1821. invalid_param = true;
  1822. break;
  1823. }
  1824. sparams.port = std::stoi(argv[i]);
  1825. }
  1826. else if (arg == "--host")
  1827. {
  1828. if (++i >= argc)
  1829. {
  1830. invalid_param = true;
  1831. break;
  1832. }
  1833. sparams.hostname = argv[i];
  1834. }
  1835. else if (arg == "--path")
  1836. {
  1837. if (++i >= argc)
  1838. {
  1839. invalid_param = true;
  1840. break;
  1841. }
  1842. sparams.public_path = argv[i];
  1843. }
  1844. else if (arg == "--api-key")
  1845. {
  1846. if (++i >= argc)
  1847. {
  1848. invalid_param = true;
  1849. break;
  1850. }
  1851. sparams.api_keys.emplace_back(argv[i]);
  1852. }
  1853. else if (arg == "--api-key-file")
  1854. {
  1855. if (++i >= argc)
  1856. {
  1857. invalid_param = true;
  1858. break;
  1859. }
  1860. std::ifstream key_file(argv[i]);
  1861. if (!key_file) {
  1862. fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
  1863. invalid_param = true;
  1864. break;
  1865. }
  1866. std::string key;
  1867. while (std::getline(key_file, key)) {
  1868. if (key.size() > 0) {
  1869. sparams.api_keys.push_back(key);
  1870. }
  1871. }
  1872. key_file.close();
  1873. }
  1874. else if (arg == "--timeout" || arg == "-to")
  1875. {
  1876. if (++i >= argc)
  1877. {
  1878. invalid_param = true;
  1879. break;
  1880. }
  1881. sparams.read_timeout = std::stoi(argv[i]);
  1882. sparams.write_timeout = std::stoi(argv[i]);
  1883. }
  1884. else if (arg == "-m" || arg == "--model")
  1885. {
  1886. if (++i >= argc)
  1887. {
  1888. invalid_param = true;
  1889. break;
  1890. }
  1891. params.model = argv[i];
  1892. }
  1893. else if (arg == "-a" || arg == "--alias")
  1894. {
  1895. if (++i >= argc)
  1896. {
  1897. invalid_param = true;
  1898. break;
  1899. }
  1900. params.model_alias = argv[i];
  1901. }
  1902. else if (arg == "-h" || arg == "--help")
  1903. {
  1904. server_print_usage(argv[0], default_params, default_sparams);
  1905. exit(0);
  1906. }
  1907. else if (arg == "-c" || arg == "--ctx-size" || arg == "--ctx_size")
  1908. {
  1909. if (++i >= argc)
  1910. {
  1911. invalid_param = true;
  1912. break;
  1913. }
  1914. params.n_ctx = std::stoi(argv[i]);
  1915. }
  1916. else if (arg == "--rope-scaling")
  1917. {
  1918. if (++i >= argc)
  1919. {
  1920. invalid_param = true;
  1921. break;
  1922. }
  1923. std::string value(argv[i]);
  1924. /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
  1925. else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
  1926. else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
  1927. else { invalid_param = true; break; }
  1928. }
  1929. else if (arg == "--rope-freq-base")
  1930. {
  1931. if (++i >= argc)
  1932. {
  1933. invalid_param = true;
  1934. break;
  1935. }
  1936. params.rope_freq_base = std::stof(argv[i]);
  1937. }
  1938. else if (arg == "--rope-freq-scale")
  1939. {
  1940. if (++i >= argc)
  1941. {
  1942. invalid_param = true;
  1943. break;
  1944. }
  1945. params.rope_freq_scale = std::stof(argv[i]);
  1946. }
  1947. else if (arg == "--yarn-ext-factor")
  1948. {
  1949. if (++i >= argc) {
  1950. invalid_param = true;
  1951. break;
  1952. }
  1953. params.yarn_ext_factor = std::stof(argv[i]);
  1954. }
  1955. else if (arg == "--yarn-attn-factor")
  1956. {
  1957. if (++i >= argc) {
  1958. invalid_param = true;
  1959. break;
  1960. }
  1961. params.yarn_attn_factor = std::stof(argv[i]);
  1962. }
  1963. else if (arg == "--yarn-beta-fast")
  1964. {
  1965. if (++i >= argc) {
  1966. invalid_param = true;
  1967. break;
  1968. }
  1969. params.yarn_beta_fast = std::stof(argv[i]);
  1970. }
  1971. else if (arg == "--yarn-beta-slow")
  1972. {
  1973. if (++i >= argc) {
  1974. invalid_param = true;
  1975. break;
  1976. }
  1977. params.yarn_beta_slow = std::stof(argv[i]);
  1978. }
  1979. else if (arg == "--pooling")
  1980. {
  1981. if (++i >= argc) {
  1982. invalid_param = true;
  1983. break;
  1984. }
  1985. std::string value(argv[i]);
  1986. /**/ if (value == "none") { params.pooling_type = LLAMA_POOLING_TYPE_NONE; }
  1987. else if (value == "mean") { params.pooling_type = LLAMA_POOLING_TYPE_MEAN; }
  1988. else if (value == "cls") { params.pooling_type = LLAMA_POOLING_TYPE_CLS; }
  1989. else { invalid_param = true; break; }
  1990. }
  1991. else if (arg == "--threads" || arg == "-t")
  1992. {
  1993. if (++i >= argc)
  1994. {
  1995. invalid_param = true;
  1996. break;
  1997. }
  1998. params.n_threads = std::stoi(argv[i]);
  1999. }
  2000. else if (arg == "--grp-attn-n" || arg == "-gan")
  2001. {
  2002. if (++i >= argc) {
  2003. invalid_param = true;
  2004. break;
  2005. }
  2006. params.grp_attn_n = std::stoi(argv[i]);
  2007. }
  2008. else if (arg == "--grp-attn-w" || arg == "-gaw")
  2009. {
  2010. if (++i >= argc)
  2011. {
  2012. invalid_param = true;
  2013. break;
  2014. }
  2015. params.grp_attn_w = std::stoi(argv[i]);
  2016. }
  2017. else if (arg == "--threads-batch" || arg == "-tb")
  2018. {
  2019. if (++i >= argc)
  2020. {
  2021. invalid_param = true;
  2022. break;
  2023. }
  2024. params.n_threads_batch = std::stoi(argv[i]);
  2025. }
  2026. else if (arg == "--threads-http")
  2027. {
  2028. if (++i >= argc)
  2029. {
  2030. invalid_param = true;
  2031. break;
  2032. }
  2033. sparams.n_threads_http = std::stoi(argv[i]);
  2034. }
  2035. else if (arg == "-b" || arg == "--batch-size")
  2036. {
  2037. if (++i >= argc)
  2038. {
  2039. invalid_param = true;
  2040. break;
  2041. }
  2042. params.n_batch = std::stoi(argv[i]);
  2043. }
  2044. else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers")
  2045. {
  2046. if (++i >= argc)
  2047. {
  2048. invalid_param = true;
  2049. break;
  2050. }
  2051. if (llama_supports_gpu_offload()) {
  2052. params.n_gpu_layers = std::stoi(argv[i]);
  2053. } else {
  2054. LOG_WARNING("Not compiled with GPU offload support, --n-gpu-layers option will be ignored. "
  2055. "See main README.md for information on enabling GPU BLAS support",
  2056. {{"n_gpu_layers", params.n_gpu_layers}});
  2057. }
  2058. }
  2059. else if (arg == "--split-mode" || arg == "-sm")
  2060. {
  2061. if (++i >= argc) {
  2062. invalid_param = true;
  2063. break;
  2064. }
  2065. std::string arg_next = argv[i];
  2066. if (arg_next == "none")
  2067. {
  2068. params.split_mode = LLAMA_SPLIT_MODE_NONE;
  2069. }
  2070. else if (arg_next == "layer")
  2071. {
  2072. params.split_mode = LLAMA_SPLIT_MODE_LAYER;
  2073. }
  2074. else if (arg_next == "row")
  2075. {
  2076. params.split_mode = LLAMA_SPLIT_MODE_ROW;
  2077. }
  2078. else {
  2079. invalid_param = true;
  2080. break;
  2081. }
  2082. #ifndef GGML_USE_CUDA
  2083. fprintf(stderr, "warning: llama.cpp was compiled without CUDA. Setting the split mode has no effect.\n");
  2084. #endif // GGML_USE_CUDA
  2085. }
  2086. else if (arg == "--tensor-split" || arg == "-ts")
  2087. {
  2088. if (++i >= argc)
  2089. {
  2090. invalid_param = true;
  2091. break;
  2092. }
  2093. #if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
  2094. std::string arg_next = argv[i];
  2095. // split string by , and /
  2096. const std::regex regex{R"([,/]+)"};
  2097. std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
  2098. std::vector<std::string> split_arg{it, {}};
  2099. GGML_ASSERT(split_arg.size() <= llama_max_devices());
  2100. for (size_t i_device = 0; i_device < llama_max_devices(); ++i_device)
  2101. {
  2102. if (i_device < split_arg.size())
  2103. {
  2104. params.tensor_split[i_device] = std::stof(split_arg[i_device]);
  2105. }
  2106. else
  2107. {
  2108. params.tensor_split[i_device] = 0.0f;
  2109. }
  2110. }
  2111. #else
  2112. LOG_WARNING("llama.cpp was compiled without CUDA. It is not possible to set a tensor split.\n", {});
  2113. #endif // GGML_USE_CUDA
  2114. }
  2115. else if (arg == "--main-gpu" || arg == "-mg")
  2116. {
  2117. if (++i >= argc)
  2118. {
  2119. invalid_param = true;
  2120. break;
  2121. }
  2122. #if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
  2123. params.main_gpu = std::stoi(argv[i]);
  2124. #else
  2125. LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
  2126. #endif
  2127. }
  2128. else if (arg == "--lora")
  2129. {
  2130. if (++i >= argc)
  2131. {
  2132. invalid_param = true;
  2133. break;
  2134. }
  2135. params.lora_adapter.emplace_back(argv[i], 1.0f);
  2136. params.use_mmap = false;
  2137. }
  2138. else if (arg == "--lora-scaled")
  2139. {
  2140. if (++i >= argc)
  2141. {
  2142. invalid_param = true;
  2143. break;
  2144. }
  2145. const char * lora_adapter = argv[i];
  2146. if (++i >= argc)
  2147. {
  2148. invalid_param = true;
  2149. break;
  2150. }
  2151. params.lora_adapter.emplace_back(lora_adapter, std::stof(argv[i]));
  2152. params.use_mmap = false;
  2153. }
  2154. else if (arg == "--lora-base")
  2155. {
  2156. if (++i >= argc)
  2157. {
  2158. invalid_param = true;
  2159. break;
  2160. }
  2161. params.lora_base = argv[i];
  2162. }
  2163. else if (arg == "-v" || arg == "--verbose")
  2164. {
  2165. server_verbose = true;
  2166. }
  2167. else if (arg == "--mlock")
  2168. {
  2169. params.use_mlock = true;
  2170. }
  2171. else if (arg == "--no-mmap")
  2172. {
  2173. params.use_mmap = false;
  2174. }
  2175. else if (arg == "--numa")
  2176. {
  2177. if (++i >= argc) {
  2178. invalid_param = true;
  2179. break;
  2180. } else {
  2181. std::string value(argv[i]);
  2182. /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
  2183. else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
  2184. else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
  2185. else { invalid_param = true; break; }
  2186. }
  2187. }
  2188. else if (arg == "--embedding")
  2189. {
  2190. params.embedding = true;
  2191. }
  2192. else if (arg == "-cb" || arg == "--cont-batching")
  2193. {
  2194. params.cont_batching = true;
  2195. }
  2196. else if (arg == "-fa" || arg == "--flash-attn")
  2197. {
  2198. params.flash_attn = true;
  2199. }
  2200. else if (arg == "-np" || arg == "--parallel")
  2201. {
  2202. if (++i >= argc)
  2203. {
  2204. invalid_param = true;
  2205. break;
  2206. }
  2207. params.n_parallel = std::stoi(argv[i]);
  2208. }
  2209. else if (arg == "-n" || arg == "--n-predict")
  2210. {
  2211. if (++i >= argc)
  2212. {
  2213. invalid_param = true;
  2214. break;
  2215. }
  2216. params.n_predict = std::stoi(argv[i]);
  2217. }
  2218. else if (arg == "-ctk" || arg == "--cache-type-k") {
  2219. params.cache_type_k = argv[++i];
  2220. }
  2221. else if (arg == "-ctv" || arg == "--cache-type-v") {
  2222. params.cache_type_v = argv[++i];
  2223. }
  2224. else if(arg == "--mmproj")
  2225. {
  2226. if (++i >= argc)
  2227. {
  2228. invalid_param = true;
  2229. break;
  2230. }
  2231. params.mmproj = argv[i];
  2232. }
  2233. else if (arg == "--log-format")
  2234. {
  2235. if (++i >= argc)
  2236. {
  2237. invalid_param = true;
  2238. break;
  2239. }
  2240. if (std::strcmp(argv[i], "json") == 0)
  2241. {
  2242. server_log_json = true;
  2243. }
  2244. else if (std::strcmp(argv[i], "text") == 0)
  2245. {
  2246. server_log_json = false;
  2247. }
  2248. else
  2249. {
  2250. invalid_param = true;
  2251. break;
  2252. }
  2253. }
  2254. else if (arg == "--log-disable")
  2255. {
  2256. log_set_target(stdout);
  2257. LOG_DEBUG("logging to file is disabled.", {});
  2258. }
  2259. else if (arg == "--slots-endpoint-disable")
  2260. {
  2261. sparams.slots_endpoint = false;
  2262. }
  2263. else if (arg == "--metrics")
  2264. {
  2265. sparams.metrics_endpoint = true;
  2266. }
  2267. else if (arg == "--chat-template")
  2268. {
  2269. if (++i >= argc)
  2270. {
  2271. invalid_param = true;
  2272. break;
  2273. }
  2274. if (!verify_custom_template(argv[i])) {
  2275. fprintf(stderr, "error: the supplied chat template is not supported: %s\n", argv[i]);
  2276. fprintf(stderr, "note: llama.cpp does not use jinja parser, we only support commonly used templates\n");
  2277. invalid_param = true;
  2278. break;
  2279. }
  2280. }
  2281. else if (arg == "--override-kv")
  2282. {
  2283. if (++i >= argc) {
  2284. invalid_param = true;
  2285. break;
  2286. }
  2287. char * sep = strchr(argv[i], '=');
  2288. if (sep == nullptr || sep - argv[i] >= 128) {
  2289. fprintf(stderr, "error: Malformed KV override: %s\n", argv[i]);
  2290. invalid_param = true;
  2291. break;
  2292. }
  2293. struct llama_model_kv_override kvo;
  2294. std::strncpy(kvo.key, argv[i], sep - argv[i]);
  2295. kvo.key[sep - argv[i]] = 0;
  2296. sep++;
  2297. if (strncmp(sep, "int:", 4) == 0) {
  2298. sep += 4;
  2299. kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
  2300. kvo.val_i64 = std::atol(sep);
  2301. } else if (strncmp(sep, "float:", 6) == 0) {
  2302. sep += 6;
  2303. kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
  2304. kvo.val_f64 = std::atof(sep);
  2305. } else if (strncmp(sep, "bool:", 5) == 0) {
  2306. sep += 5;
  2307. kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
  2308. if (std::strcmp(sep, "true") == 0) {
  2309. kvo.val_bool = true;
  2310. } else if (std::strcmp(sep, "false") == 0) {
  2311. kvo.val_bool = false;
  2312. } else {
  2313. fprintf(stderr, "error: Invalid boolean value for KV override: %s\n", argv[i]);
  2314. invalid_param = true;
  2315. break;
  2316. }
  2317. } else {
  2318. fprintf(stderr, "error: Invalid type for KV override: %s\n", argv[i]);
  2319. invalid_param = true;
  2320. break;
  2321. }
  2322. params.kv_overrides.push_back(kvo);
  2323. }
  2324. else
  2325. {
  2326. fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
  2327. server_print_usage(argv[0], default_params, default_sparams);
  2328. exit(1);
  2329. }
  2330. }
  2331. if (!params.kv_overrides.empty()) {
  2332. params.kv_overrides.emplace_back();
  2333. params.kv_overrides.back().key[0] = 0;
  2334. }
  2335. if (invalid_param)
  2336. {
  2337. fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
  2338. server_print_usage(argv[0], default_params, default_sparams);
  2339. exit(1);
  2340. }
  2341. }
  2342. /* llama.cpp completion api semantics */
  2343. static json format_partial_response(
  2344. llama_server_context &llama, server_slot *slot, const std::string &content, const std::vector<completion_token_output> &probs
  2345. ) {
  2346. json res = json
  2347. {
  2348. {"content", content },
  2349. {"stop", false},
  2350. {"slot_id", slot->id },
  2351. {"multimodal", llama.multimodal }
  2352. };
  2353. if (slot->sparams.n_probs > 0)
  2354. {
  2355. res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
  2356. }
  2357. return res;
  2358. }
  2359. static json format_tokenizer_response(const std::vector<llama_token> &tokens)
  2360. {
  2361. return json {
  2362. {"tokens", tokens}
  2363. };
  2364. }
  2365. static json format_detokenized_response(std::string content)
  2366. {
  2367. return json {
  2368. {"content", content}
  2369. };
  2370. }
  2371. static void log_server_request(const httplib::Request &req, const httplib::Response &res)
  2372. {
  2373. // skip GH copilot requests when using default port
  2374. if (req.path == "/health" || req.path == "/v1/health" || req.path == "/v1/completions")
  2375. {
  2376. return;
  2377. }
  2378. LOG_DEBUG("request", {
  2379. {"remote_addr", req.remote_addr},
  2380. {"remote_port", req.remote_port},
  2381. {"status", res.status},
  2382. {"method", req.method},
  2383. {"path", req.path},
  2384. {"params", req.params},
  2385. });
  2386. LOG_VERBOSE("request", {
  2387. {"request", req.body},
  2388. {"response", res.body},
  2389. });
  2390. }
  2391. static void append_to_generated_text_from_generated_token_probs(llama_server_context &llama, server_slot *slot)
  2392. {
  2393. auto & gtps = slot->generated_token_probs;
  2394. auto translator = token_translator{llama.ctx};
  2395. auto add_strlen = [=](size_t sum, const completion_token_output & cto) { return sum + translator(cto).size(); };
  2396. const size_t len = std::accumulate(gtps.begin(), gtps.end(), size_t(0), add_strlen);
  2397. if (slot->generated_text.capacity() < slot->generated_text.size() + len)
  2398. {
  2399. slot->generated_text.reserve(slot->generated_text.size() + len);
  2400. }
  2401. for (const completion_token_output & cto : gtps)
  2402. {
  2403. slot->generated_text += translator(cto);
  2404. }
  2405. }
  2406. std::function<void(int)> shutdown_handler;
  2407. std::atomic_flag is_terminating = ATOMIC_FLAG_INIT;
  2408. inline void signal_handler(int signal) {
  2409. if (is_terminating.test_and_set()) {
  2410. // in case it hangs, we can force terminate the server by hitting Ctrl+C twice
  2411. // this is for better developer experience, we can remove when the server is stable enough
  2412. fprintf(stderr, "Received second interrupt, terminating immediately.\n");
  2413. exit(1);
  2414. }
  2415. shutdown_handler(signal);
  2416. }
  2417. static bool update_load_progress(float progress, void *data)
  2418. {
  2419. ((llama_server_context*)data)->modelProgress = progress;
  2420. return true;
  2421. }
  2422. #if defined(_WIN32)
  2423. char* wchar_to_char(const wchar_t* wstr) {
  2424. if (wstr == nullptr) return nullptr;
  2425. // Determine the number of bytes needed for the UTF-8 string
  2426. int bytes = WideCharToMultiByte(CP_UTF8, 0, wstr, -1, nullptr, 0, nullptr, nullptr);
  2427. char* str = new char[bytes];
  2428. // Convert the wide-character string to a UTF-8 string
  2429. WideCharToMultiByte(CP_UTF8, 0, wstr, -1, str, bytes, nullptr, nullptr);
  2430. return str;
  2431. }
  2432. int wmain(int argc, wchar_t **wargv) {
  2433. char** argv = new char*[argc];
  2434. for (int i = 0; i < argc; ++i) {
  2435. argv[i] = wchar_to_char(wargv[i]);
  2436. }
  2437. // Adjust error mode to avoid error dialog after we start.
  2438. SetErrorMode(SEM_FAILCRITICALERRORS);
  2439. #else
  2440. int main(int argc, char **argv) {
  2441. #endif
  2442. #if SERVER_VERBOSE != 1
  2443. log_disable();
  2444. #endif
  2445. // own arguments required by this example
  2446. gpt_params params;
  2447. server_params sparams;
  2448. // struct that contains llama context and inference
  2449. llama_server_context llama;
  2450. server_params_parse(argc, argv, sparams, params);
  2451. if (params.model_alias == "unknown")
  2452. {
  2453. params.model_alias = params.model;
  2454. }
  2455. llama_backend_init();
  2456. llama_numa_init(params.numa);
  2457. LOG_INFO("build info", {{"build", LLAMA_BUILD_NUMBER},
  2458. {"commit", LLAMA_COMMIT}});
  2459. LOG_INFO("system info", {
  2460. {"n_threads", params.n_threads},
  2461. {"n_threads_batch", params.n_threads_batch},
  2462. {"total_threads", std::thread::hardware_concurrency()},
  2463. {"system_info", llama_print_system_info()},
  2464. });
  2465. httplib::Server svr;
  2466. std::atomic<server_state> state{SERVER_STATE_LOADING_MODEL};
  2467. svr.set_default_headers({{"Server", "llama.cpp"}});
  2468. // CORS preflight
  2469. svr.Options(R"(.*)", [](const httplib::Request &req, httplib::Response &res) {
  2470. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2471. res.set_header("Access-Control-Allow-Credentials", "true");
  2472. res.set_header("Access-Control-Allow-Methods", "POST");
  2473. res.set_header("Access-Control-Allow-Headers", "*");
  2474. });
  2475. svr.Get("/health", [&](const httplib::Request& req, httplib::Response& res) {
  2476. server_state current_state = state.load();
  2477. switch(current_state) {
  2478. case SERVER_STATE_READY: {
  2479. // request slots data using task queue
  2480. task_server task;
  2481. task.id = llama.queue_tasks.get_new_id();
  2482. task.type = TASK_TYPE_METRICS;
  2483. task.target_id = -1;
  2484. llama.queue_results.add_waiting_task_id(task.id);
  2485. llama.queue_tasks.post(task);
  2486. // get the result
  2487. task_result result = llama.queue_results.recv(task.id);
  2488. llama.queue_results.remove_waiting_task_id(task.id);
  2489. int n_idle_slots = result.result_json["idle"];
  2490. int n_processing_slots = result.result_json["processing"];
  2491. json health = {
  2492. {"status", "ok"},
  2493. {"slots_idle", n_idle_slots},
  2494. {"slots_processing", n_processing_slots}};
  2495. res.status = 200; // HTTP OK
  2496. if (sparams.slots_endpoint && req.has_param("include_slots")) {
  2497. health["slots"] = result.result_json["slots"];
  2498. }
  2499. if (n_idle_slots == 0) {
  2500. health["status"] = "no slot available";
  2501. if (req.has_param("fail_on_no_slot")) {
  2502. res.status = 503; // HTTP Service Unavailable
  2503. }
  2504. }
  2505. res.set_content(health.dump(), "application/json");
  2506. break;
  2507. }
  2508. case SERVER_STATE_LOADING_MODEL:
  2509. char buf[128];
  2510. snprintf(&buf[0], 128, R"({"status": "loading model", "progress": %0.2f})", llama.modelProgress);
  2511. res.set_content(buf, "application/json");
  2512. res.status = 503; // HTTP Service Unavailable
  2513. break;
  2514. case SERVER_STATE_ERROR:
  2515. res.set_content(R"({"status": "error", "error": "Model failed to load"})", "application/json");
  2516. res.status = 500; // HTTP Internal Server Error
  2517. break;
  2518. }
  2519. });
  2520. if (sparams.slots_endpoint) {
  2521. svr.Get("/slots", [&](const httplib::Request&, httplib::Response& res) {
  2522. // request slots data using task queue
  2523. task_server task;
  2524. task.id = llama.queue_tasks.get_new_id();
  2525. task.type = TASK_TYPE_METRICS;
  2526. task.target_id = -1;
  2527. llama.queue_results.add_waiting_task_id(task.id);
  2528. llama.queue_tasks.post(task);
  2529. // get the result
  2530. task_result result = llama.queue_results.recv(task.id);
  2531. llama.queue_results.remove_waiting_task_id(task.id);
  2532. res.set_content(result.result_json["slots"].dump(), "application/json");
  2533. res.status = 200; // HTTP OK
  2534. });
  2535. }
  2536. if (sparams.metrics_endpoint) {
  2537. svr.Get("/metrics", [&](const httplib::Request&, httplib::Response& res) {
  2538. // request slots data using task queue
  2539. task_server task;
  2540. task.id = llama.queue_tasks.get_new_id();
  2541. task.type = TASK_TYPE_METRICS;
  2542. task.target_id = -1;
  2543. llama.queue_results.add_waiting_task_id(task.id);
  2544. llama.queue_tasks.post(task);
  2545. // get the result
  2546. task_result result = llama.queue_results.recv(task.id);
  2547. llama.queue_results.remove_waiting_task_id(task.id);
  2548. json data = result.result_json;
  2549. uint64_t n_prompt_tokens_processed = data["n_prompt_tokens_processed"];
  2550. uint64_t t_prompt_processing = data["t_prompt_processing"];
  2551. uint64_t n_tokens_predicted = data["n_tokens_predicted"];
  2552. uint64_t t_tokens_generation = data["t_tokens_generation"];
  2553. int32_t kv_cache_used_cells = data["kv_cache_used_cells"];
  2554. // metrics definition: https://prometheus.io/docs/practices/naming/#metric-names
  2555. json all_metrics_def = json {
  2556. {"counter", {{
  2557. {"name", "prompt_tokens_total"},
  2558. {"help", "Number of prompt tokens processed."},
  2559. {"value", data["n_prompt_tokens_processed_total"]}
  2560. }, {
  2561. {"name", "tokens_predicted_total"},
  2562. {"help", "Number of generation tokens processed."},
  2563. {"value", data["n_tokens_predicted_total"]}
  2564. }}},
  2565. {"gauge", {{
  2566. {"name", "prompt_tokens_seconds"},
  2567. {"help", "Average prompt throughput in tokens/s."},
  2568. {"value", n_prompt_tokens_processed ? 1e3 / t_prompt_processing * n_prompt_tokens_processed : 0}
  2569. },{
  2570. {"name", "predicted_tokens_seconds"},
  2571. {"help", "Average generation throughput in tokens/s."},
  2572. {"value", n_tokens_predicted ? 1e3 / t_tokens_generation * n_tokens_predicted : 0}
  2573. },{
  2574. {"name", "kv_cache_usage_ratio"},
  2575. {"help", "KV-cache usage. 1 means 100 percent usage."},
  2576. {"value", 1. * kv_cache_used_cells / params.n_ctx}
  2577. },{
  2578. {"name", "kv_cache_tokens"},
  2579. {"help", "KV-cache tokens."},
  2580. {"value", data["kv_cache_tokens_count"]}
  2581. },{
  2582. {"name", "requests_processing"},
  2583. {"help", "Number of request processing."},
  2584. {"value", data["processing"]}
  2585. },{
  2586. {"name", "requests_deferred"},
  2587. {"help", "Number of request deferred."},
  2588. {"value", data["deferred"]}
  2589. }}}
  2590. };
  2591. std::stringstream prometheus;
  2592. for (const auto& el : all_metrics_def.items()) {
  2593. const auto& type = el.key();
  2594. const auto& metrics_def = el.value();
  2595. for (const auto& metric_def : metrics_def) {
  2596. std::string name = metric_def["name"];
  2597. std::string help = metric_def["help"];
  2598. auto value = json_value(metric_def, "value", 0);
  2599. prometheus << "# HELP llamacpp:" << name << " " << help << "\n"
  2600. << "# TYPE llamacpp:" << name << " " << type << "\n"
  2601. << "llamacpp:" << name << " " << value << "\n";
  2602. }
  2603. }
  2604. res.set_content(prometheus.str(), "text/plain; version=0.0.4");
  2605. res.status = 200; // HTTP OK
  2606. });
  2607. }
  2608. svr.set_logger(log_server_request);
  2609. svr.set_exception_handler([](const httplib::Request &, httplib::Response &res, std::exception_ptr ep)
  2610. {
  2611. const char fmt[] = "500 Internal Server Error\n%s";
  2612. char buf[BUFSIZ];
  2613. try
  2614. {
  2615. std::rethrow_exception(std::move(ep));
  2616. }
  2617. catch (std::exception &e)
  2618. {
  2619. snprintf(buf, sizeof(buf), fmt, e.what());
  2620. }
  2621. catch (...)
  2622. {
  2623. snprintf(buf, sizeof(buf), fmt, "Unknown Exception");
  2624. }
  2625. res.set_content(buf, "text/plain; charset=utf-8");
  2626. res.status = 500;
  2627. });
  2628. svr.set_error_handler([](const httplib::Request &, httplib::Response &res)
  2629. {
  2630. if (res.status == 401)
  2631. {
  2632. res.set_content("Unauthorized", "text/plain; charset=utf-8");
  2633. }
  2634. if (res.status == 400)
  2635. {
  2636. res.set_content("Invalid request", "text/plain; charset=utf-8");
  2637. }
  2638. else if (res.status == 404)
  2639. {
  2640. res.set_content("File Not Found", "text/plain; charset=utf-8");
  2641. res.status = 404;
  2642. }
  2643. });
  2644. // set timeouts and change hostname and port
  2645. svr.set_read_timeout (sparams.read_timeout);
  2646. svr.set_write_timeout(sparams.write_timeout);
  2647. if (!svr.bind_to_port(sparams.hostname, sparams.port))
  2648. {
  2649. fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", sparams.hostname.c_str(), sparams.port);
  2650. return 1;
  2651. }
  2652. // Set the base directory for serving static files
  2653. svr.set_base_dir(sparams.public_path);
  2654. std::unordered_map<std::string, std::string> log_data;
  2655. log_data["hostname"] = sparams.hostname;
  2656. log_data["port"] = std::to_string(sparams.port);
  2657. if (sparams.api_keys.size() == 1) {
  2658. log_data["api_key"] = "api_key: ****" + sparams.api_keys[0].substr(sparams.api_keys[0].length() - 4);
  2659. } else if (sparams.api_keys.size() > 1) {
  2660. log_data["api_key"] = "api_key: " + std::to_string(sparams.api_keys.size()) + " keys loaded";
  2661. }
  2662. if (sparams.n_threads_http < 1) {
  2663. // +2 threads for monitoring endpoints
  2664. sparams.n_threads_http = std::max(params.n_parallel + 2, (int32_t) std::thread::hardware_concurrency() - 1);
  2665. }
  2666. log_data["n_threads_http"] = std::to_string(sparams.n_threads_http);
  2667. svr.new_task_queue = [&sparams] { return new httplib::ThreadPool(sparams.n_threads_http); };
  2668. LOG_INFO("HTTP server listening", log_data);
  2669. // run the HTTP server in a thread - see comment below
  2670. std::thread t([&]()
  2671. {
  2672. if (!svr.listen_after_bind())
  2673. {
  2674. state.store(SERVER_STATE_ERROR);
  2675. return 1;
  2676. }
  2677. return 0;
  2678. });
  2679. // load the model
  2680. params.progress_callback = update_load_progress;
  2681. params.progress_callback_user_data = (void*)&llama;
  2682. if (!llama.load_model(params))
  2683. {
  2684. state.store(SERVER_STATE_ERROR);
  2685. return 1;
  2686. } else {
  2687. llama.initialize();
  2688. state.store(SERVER_STATE_READY);
  2689. LOG_INFO("model loaded", {});
  2690. }
  2691. const auto model_meta = llama.model_meta();
  2692. // Middleware for API key validation
  2693. auto validate_api_key = [&sparams](const httplib::Request &req, httplib::Response &res) -> bool {
  2694. // If API key is not set, skip validation
  2695. if (sparams.api_keys.empty()) {
  2696. return true;
  2697. }
  2698. // Check for API key in the header
  2699. auto auth_header = req.get_header_value("Authorization");
  2700. std::string prefix = "Bearer ";
  2701. if (auth_header.substr(0, prefix.size()) == prefix) {
  2702. std::string received_api_key = auth_header.substr(prefix.size());
  2703. if (std::find(sparams.api_keys.begin(), sparams.api_keys.end(), received_api_key) != sparams.api_keys.end()) {
  2704. return true; // API key is valid
  2705. }
  2706. }
  2707. // API key is invalid or not provided
  2708. res.set_content("Unauthorized: Invalid API Key", "text/plain; charset=utf-8");
  2709. res.status = 401; // Unauthorized
  2710. LOG_WARNING("Unauthorized: Invalid API Key", {});
  2711. return false;
  2712. };
  2713. // this is only called if no index.html is found in the public --path
  2714. svr.Get("/", [](const httplib::Request &, httplib::Response &res)
  2715. {
  2716. res.set_content("server running", "text/plain; charset=utf-8");
  2717. res.status = 200; // Unauthorized
  2718. return true;
  2719. });
  2720. svr.Post("/completion", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res)
  2721. {
  2722. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2723. if (!validate_api_key(req, res)) {
  2724. return;
  2725. }
  2726. json data = json::parse(req.body);
  2727. const int task_id = llama.queue_tasks.get_new_id();
  2728. llama.queue_results.add_waiting_task_id(task_id);
  2729. llama.request_completion(task_id, data, false, -1);
  2730. if (!json_value(data, "stream", false)) {
  2731. std::string completion_text;
  2732. task_result result = llama.queue_results.recv(task_id);
  2733. if (!result.error && result.stop) {
  2734. res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json; charset=utf-8");
  2735. }
  2736. else
  2737. {
  2738. res.status = 404;
  2739. res.set_content(result.result_json["content"], "text/plain; charset=utf-8");
  2740. }
  2741. llama.queue_results.remove_waiting_task_id(task_id);
  2742. } else {
  2743. const auto chunked_content_provider = [task_id, &llama](size_t, httplib::DataSink & sink)
  2744. {
  2745. while (true)
  2746. {
  2747. task_result result = llama.queue_results.recv(task_id);
  2748. if (!result.error) {
  2749. const std::string str =
  2750. "data: " +
  2751. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
  2752. "\n\n";
  2753. LOG_VERBOSE("data stream", {
  2754. { "to_send", str }
  2755. });
  2756. if (!sink.write(str.c_str(), str.size()))
  2757. {
  2758. llama.queue_results.remove_waiting_task_id(task_id);
  2759. return false;
  2760. }
  2761. if (result.stop) {
  2762. break;
  2763. }
  2764. } else {
  2765. const std::string str =
  2766. "error: " +
  2767. result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
  2768. "\n\n";
  2769. LOG_VERBOSE("data stream", {
  2770. { "to_send", str }
  2771. });
  2772. if (!sink.write(str.c_str(), str.size()))
  2773. {
  2774. llama.queue_results.remove_waiting_task_id(task_id);
  2775. return false;
  2776. }
  2777. break;
  2778. }
  2779. }
  2780. llama.queue_results.remove_waiting_task_id(task_id);
  2781. sink.done();
  2782. return true;
  2783. };
  2784. auto on_complete = [task_id, &llama] (bool)
  2785. {
  2786. // cancel
  2787. llama.request_cancel(task_id);
  2788. llama.queue_results.remove_waiting_task_id(task_id);
  2789. };
  2790. res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
  2791. }
  2792. });
  2793. svr.Post("/tokenize", [&llama](const httplib::Request &req, httplib::Response &res)
  2794. {
  2795. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2796. const json body = json::parse(req.body);
  2797. std::vector<llama_token> tokens;
  2798. if (body.count("content") != 0)
  2799. {
  2800. tokens = llama.tokenize(body["content"], false);
  2801. }
  2802. const json data = format_tokenizer_response(tokens);
  2803. return res.set_content(data.dump(), "application/json; charset=utf-8");
  2804. });
  2805. svr.Post("/detokenize", [&llama](const httplib::Request &req, httplib::Response &res)
  2806. {
  2807. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2808. const json body = json::parse(req.body);
  2809. std::string content;
  2810. if (body.count("tokens") != 0)
  2811. {
  2812. const std::vector<llama_token> tokens = body["tokens"];
  2813. content = tokens_to_str(llama.ctx, tokens.cbegin(), tokens.cend());
  2814. }
  2815. const json data = format_detokenized_response(content);
  2816. return res.set_content(data.dump(), "application/json; charset=utf-8");
  2817. });
  2818. svr.Post("/embedding", [&llama](const httplib::Request &req, httplib::Response &res)
  2819. {
  2820. res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
  2821. const json body = json::parse(req.body);
  2822. json prompt;
  2823. if (body.count("content") != 0)
  2824. {
  2825. prompt = body["content"];
  2826. }
  2827. else
  2828. {
  2829. prompt = "";
  2830. }
  2831. if (prompt.size() == 1) {
  2832. prompt = prompt[0];
  2833. }
  2834. // create and queue the task
  2835. json responses;
  2836. {
  2837. const int id_task = llama.queue_tasks.get_new_id();
  2838. llama.queue_results.add_waiting_task_id(id_task);
  2839. llama.request_completion(id_task, {{"prompt", prompt}}, true, -1);
  2840. // get the result
  2841. task_result result = llama.queue_results.recv(id_task);
  2842. llama.queue_results.remove_waiting_task_id(id_task);
  2843. if (result.error) {
  2844. return res.set_content(result.result_json.dump(), "application/json; charset=utf-8");
  2845. }
  2846. responses = result.result_json.value("results", std::vector<json>{result.result_json});
  2847. json embeddings = json::array();
  2848. for (auto & elem : responses) {
  2849. embeddings.push_back(elem.at("embedding"));
  2850. }
  2851. // send the result
  2852. json embedding_res = json{{"embedding", embeddings}};
  2853. return res.set_content(embedding_res.dump(), "application/json; charset=utf-8");
  2854. }
  2855. });
  2856. // GG: if I put the main loop inside a thread, it crashes on the first request when build in Debug!?
  2857. // "Bus error: 10" - this is on macOS, it does not crash on Linux
  2858. //std::thread t2([&]()
  2859. /*{
  2860. bool running = true;
  2861. while (running)
  2862. {
  2863. running = llama.update_slots();
  2864. }
  2865. }*/
  2866. //);
  2867. llama.queue_tasks.on_new_task(std::bind(
  2868. &llama_server_context::process_single_task, &llama, std::placeholders::_1));
  2869. llama.queue_tasks.on_finish_multitask(std::bind(
  2870. &llama_server_context::on_finish_multitask, &llama, std::placeholders::_1));
  2871. llama.queue_tasks.on_run_slots(std::bind(
  2872. &llama_server_context::update_slots, &llama));
  2873. llama.queue_results.on_multitask_update(std::bind(
  2874. &llama_server_queue::update_multitask,
  2875. &llama.queue_tasks,
  2876. std::placeholders::_1,
  2877. std::placeholders::_2,
  2878. std::placeholders::_3
  2879. ));
  2880. shutdown_handler = [&](int) {
  2881. llama.queue_tasks.terminate();
  2882. };
  2883. #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
  2884. struct sigaction sigint_action;
  2885. sigint_action.sa_handler = signal_handler;
  2886. sigemptyset (&sigint_action.sa_mask);
  2887. sigint_action.sa_flags = 0;
  2888. sigaction(SIGINT, &sigint_action, NULL);
  2889. #elif defined (_WIN32)
  2890. auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
  2891. return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
  2892. };
  2893. SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
  2894. for (int i = 0; i < argc; ++i) {
  2895. delete[] argv[i];
  2896. }
  2897. delete[] argv;
  2898. #endif
  2899. llama.queue_tasks.start_loop();
  2900. svr.stop();
  2901. t.join();
  2902. llama_backend_free();
  2903. return 0;
  2904. }