llama.cpp 151 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368
  1. /**
  2. * llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
  3. *
  4. * MIT License
  5. *
  6. * Copyright (c) 2023 Georgi Gerganov
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. * SOFTWARE.
  25. */
  26. // Defines fileno on msys:
  27. #ifndef _GNU_SOURCE
  28. #define _GNU_SOURCE
  29. #include <cstddef>
  30. #include <cstdint>
  31. #include <cstdio>
  32. #endif
  33. #include "llama-util.h"
  34. #include "llama.h"
  35. #include "ggml.h"
  36. #ifdef GGML_USE_CUBLAS
  37. #include "ggml-cuda.h"
  38. #elif defined(GGML_USE_CLBLAST)
  39. #include "ggml-opencl.h"
  40. #endif
  41. #ifdef GGML_USE_METAL
  42. #include "ggml-metal.h"
  43. #endif
  44. #ifdef GGML_USE_MPI
  45. #include "ggml-mpi.h"
  46. #endif
  47. #ifdef GGML_USE_K_QUANTS
  48. #ifndef QK_K
  49. #ifdef GGML_QKK_64
  50. #define QK_K 64
  51. #else
  52. #define QK_K 256
  53. #endif
  54. #endif
  55. #endif
  56. #include <array>
  57. #include <ctime>
  58. #include <cinttypes>
  59. #include <fstream>
  60. #include <random>
  61. #include <map>
  62. #include <unordered_map>
  63. #include <queue>
  64. #include <cassert>
  65. #include <cstring>
  66. #include <climits>
  67. #include <memory>
  68. #include <algorithm>
  69. #include <initializer_list>
  70. #include <thread>
  71. #include <atomic>
  72. #include <mutex>
  73. #include <sstream>
  74. #include <numeric>
  75. #if defined(_MSC_VER)
  76. #pragma warning(disable: 4244 4267) // possible loss of data
  77. #endif
  78. static void llama_log_internal(llama_log_level level, const char* format, ...);
  79. static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data);
  80. #define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__)
  81. #define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__)
  82. #define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__)
  83. #if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL)
  84. #include "ggml-alloc.h"
  85. #define LLAMA_USE_ALLOCATOR
  86. #else
  87. #define LLAMA_USE_SCRATCH
  88. #define LLAMA_MAX_SCRATCH_BUFFERS 16
  89. #endif
  90. // available llama models
  91. enum e_model {
  92. MODEL_UNKNOWN,
  93. MODEL_3B,
  94. MODEL_7B,
  95. MODEL_13B,
  96. MODEL_30B,
  97. MODEL_34B,
  98. MODEL_65B,
  99. MODEL_70B,
  100. };
  101. static const size_t kB = 1024;
  102. static const size_t MB = 1024*1024;
  103. // computed for n_ctx == 2048
  104. // TODO: dynamically determine these sizes
  105. // needs modifications in ggml
  106. typedef void (*offload_func_t)(struct ggml_tensor * tensor);
  107. void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
  108. (void) tensor;
  109. }
  110. //
  111. // ggml helpers
  112. //
  113. static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
  114. struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
  115. if (plan.work_size > 0) {
  116. buf.resize(plan.work_size);
  117. plan.work_data = buf.data();
  118. }
  119. ggml_graph_compute(graph, &plan);
  120. }
  121. //
  122. // memory sizes (calculated for n_batch == 512)
  123. //
  124. static const std::map<e_model, size_t> & MEM_REQ_SCRATCH0(int n_ctx)
  125. {
  126. static std::map<e_model, size_t> k_sizes = {
  127. { MODEL_3B, ((size_t) n_ctx / 16ull + 92ull) * MB },
  128. { MODEL_7B, ((size_t) n_ctx / 16ull + 100ull) * MB },
  129. { MODEL_13B, ((size_t) n_ctx / 12ull + 120ull) * MB },
  130. { MODEL_30B, ((size_t) n_ctx / 9ull + 160ull) * MB },
  131. { MODEL_65B, ((size_t) n_ctx / 6ull + 256ull) * MB }, // guess
  132. { MODEL_70B, ((size_t) n_ctx / 7ull + 164ull) * MB },
  133. };
  134. return k_sizes;
  135. }
  136. static const std::map<e_model, size_t> & MEM_REQ_SCRATCH1()
  137. {
  138. static std::map<e_model, size_t> k_sizes = {
  139. { MODEL_3B, 128ull * MB },
  140. { MODEL_7B, 160ull * MB },
  141. { MODEL_13B, 192ull * MB },
  142. { MODEL_30B, 256ull * MB },
  143. { MODEL_65B, 384ull * MB }, // guess
  144. { MODEL_70B, 304ull * MB },
  145. };
  146. return k_sizes;
  147. }
  148. // used to store the compute graph tensors + non-scratch data
  149. static const std::map<e_model, size_t> & MEM_REQ_EVAL()
  150. {
  151. static std::map<e_model, size_t> k_sizes = {
  152. { MODEL_3B, 8ull * MB },
  153. { MODEL_7B, 10ull * MB },
  154. { MODEL_13B, 12ull * MB },
  155. { MODEL_30B, 16ull * MB },
  156. { MODEL_65B, 24ull * MB }, // guess
  157. { MODEL_70B, 24ull * MB },
  158. };
  159. return k_sizes;
  160. }
  161. // amount of VRAM needed per batch size to hold temporary results
  162. // the values for 3b are not derived from testing but instead chosen conservatively
  163. static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_BASE()
  164. {
  165. static std::map<e_model, size_t> k_sizes = {
  166. { MODEL_3B, 512ull * kB },
  167. { MODEL_7B, 512ull * kB },
  168. { MODEL_13B, 640ull * kB },
  169. { MODEL_30B, 768ull * kB },
  170. { MODEL_65B, 1280ull * kB },
  171. { MODEL_70B, 1280ull * kB },
  172. };
  173. return k_sizes;
  174. }
  175. // amount of VRAM needed per batch size and context to hold temporary results
  176. // the values for 3b are not derived from testing but instead chosen conservatively
  177. static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_PER_CONTEXT()
  178. {
  179. static std::map<e_model, size_t> k_sizes = {
  180. { MODEL_3B, 128ull },
  181. { MODEL_7B, 128ull },
  182. { MODEL_13B, 160ull },
  183. { MODEL_30B, 208ull },
  184. { MODEL_65B, 256ull },
  185. { MODEL_70B, 256ull },
  186. };
  187. return k_sizes;
  188. }
  189. // default hparams (LLaMA 7B)
  190. struct llama_hparams {
  191. uint32_t n_vocab = 32000;
  192. uint32_t n_ctx = 512; // this is provided as user input?
  193. uint32_t n_embd = 4096;
  194. uint32_t n_mult = 256;
  195. uint32_t n_head = 32;
  196. uint32_t n_head_kv = 32;
  197. uint32_t n_layer = 32;
  198. uint32_t n_rot = 64;
  199. // LLaMAv2
  200. // TODO: load from model data hparams
  201. float f_ffn_mult = 1.0f;
  202. float f_rms_norm_eps = LLAMA_DEFAULT_RMS_EPS;
  203. float rope_freq_base = 10000.0f;
  204. float rope_freq_scale = 1.0f;
  205. enum llama_ftype ftype = LLAMA_FTYPE_MOSTLY_F16;
  206. bool operator!=(const llama_hparams & other) const {
  207. return static_cast<bool>(memcmp(this, &other, sizeof(llama_hparams))); // NOLINT
  208. }
  209. uint32_t n_gqa() const {
  210. return n_head/n_head_kv;
  211. }
  212. uint32_t n_embd_head() const {
  213. return n_embd/n_head;
  214. }
  215. uint32_t n_embd_gqa() const {
  216. return n_embd/n_gqa();
  217. }
  218. size_t kv_size() const {
  219. size_t result = 2ull;
  220. result *= (size_t) n_embd_gqa();
  221. result *= (size_t) n_ctx;
  222. result *= (size_t) n_layer;
  223. result *= sizeof(ggml_fp16_t);
  224. return result;
  225. }
  226. };
  227. struct llama_layer {
  228. // normalization
  229. struct ggml_tensor * attention_norm;
  230. // attention
  231. struct ggml_tensor * wq;
  232. struct ggml_tensor * wk;
  233. struct ggml_tensor * wv;
  234. struct ggml_tensor * wo;
  235. // normalization
  236. struct ggml_tensor * ffn_norm;
  237. // ff
  238. struct ggml_tensor * w1;
  239. struct ggml_tensor * w2;
  240. struct ggml_tensor * w3;
  241. };
  242. struct llama_kv_cache {
  243. struct ggml_tensor * k = NULL;
  244. struct ggml_tensor * v = NULL;
  245. struct ggml_context * ctx = NULL;
  246. llama_ctx_buffer buf;
  247. int n; // number of tokens currently in the cache
  248. ~llama_kv_cache() {
  249. if (ctx) {
  250. ggml_free(ctx);
  251. }
  252. #ifdef GGML_USE_CUBLAS
  253. ggml_cuda_free_data(k);
  254. ggml_cuda_free_data(v);
  255. #endif // GGML_USE_CUBLAS
  256. }
  257. };
  258. struct llama_vocab {
  259. using id = int32_t;
  260. using token = std::string;
  261. struct token_score {
  262. token tok;
  263. float score;
  264. };
  265. std::unordered_map<token, id> token_to_id;
  266. std::vector<token_score> id_to_token;
  267. };
  268. struct llama_model {
  269. e_model type = MODEL_UNKNOWN;
  270. llama_hparams hparams;
  271. struct ggml_tensor * tok_embeddings;
  272. struct ggml_tensor * norm;
  273. struct ggml_tensor * output;
  274. std::vector<llama_layer> layers;
  275. int n_gpu_layers;
  276. // context
  277. struct ggml_context * ctx = NULL;
  278. // the model memory buffer
  279. llama_ctx_buffer buf;
  280. // model memory mapped file
  281. std::unique_ptr<llama_mmap> mapping;
  282. // objects representing data potentially being locked in memory
  283. llama_mlock mlock_buf;
  284. llama_mlock mlock_mmap;
  285. // for quantize-stats only
  286. std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
  287. int64_t t_load_us = 0;
  288. int64_t t_start_us = 0;
  289. llama_vocab vocab;
  290. ~llama_model() {
  291. if (ctx) {
  292. ggml_free(ctx);
  293. }
  294. #ifdef GGML_USE_CUBLAS
  295. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  296. ggml_cuda_free_data(tensors_by_name[i].second);
  297. }
  298. ggml_cuda_free_scratch();
  299. #elif defined(GGML_USE_CLBLAST)
  300. for (size_t i = 0; i < tensors_by_name.size(); ++i) {
  301. ggml_cl_free_data(tensors_by_name[i].second);
  302. }
  303. #endif
  304. }
  305. };
  306. struct llama_context {
  307. llama_context(const llama_model & model) : model(model), t_load_us(model.t_load_us), t_start_us(model.t_start_us) {}
  308. ~llama_context() {
  309. if (model_owner) {
  310. delete &model;
  311. }
  312. #ifdef GGML_USE_METAL
  313. if (ctx_metal) {
  314. ggml_metal_free(ctx_metal);
  315. }
  316. #endif
  317. #ifdef LLAMA_USE_ALLOCATOR
  318. if (alloc) {
  319. ggml_allocr_free(alloc);
  320. }
  321. #endif
  322. }
  323. std::mt19937 rng;
  324. bool has_evaluated_once = false;
  325. int64_t t_sample_us = 0;
  326. int64_t t_eval_us = 0;
  327. int64_t t_p_eval_us = 0;
  328. int32_t n_sample = 0; // number of tokens sampled
  329. int32_t n_eval = 0; // number of eval calls
  330. int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  331. const llama_model & model;
  332. bool model_owner = false;
  333. int64_t t_load_us;
  334. int64_t t_start_us;
  335. // key + value cache for the self attention
  336. struct llama_kv_cache kv_self;
  337. size_t mem_per_token = 0;
  338. // decode output (2-dimensional array: [n_tokens][n_vocab])
  339. std::vector<float> logits;
  340. bool logits_all = false;
  341. // input embedding (1-dimensional array: [n_embd])
  342. std::vector<float> embedding;
  343. // reusable buffer for `struct ggml_graph_plan.work_data`
  344. std::vector<uint8_t> work_buffer;
  345. // memory buffers used to evaluate the model
  346. // TODO: move in llama_state
  347. llama_ctx_buffer buf_compute;
  348. #ifdef LLAMA_USE_ALLOCATOR
  349. llama_ctx_buffer buf_alloc;
  350. ggml_allocr * alloc = NULL;
  351. #endif
  352. #ifdef LLAMA_USE_SCRATCH
  353. llama_ctx_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS];
  354. int buf_last = 0;
  355. size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 };
  356. #endif
  357. #ifdef GGML_USE_METAL
  358. ggml_metal_context * ctx_metal = NULL;
  359. #endif
  360. #ifdef GGML_USE_MPI
  361. ggml_mpi_context * ctx_mpi = NULL;
  362. #endif
  363. void use_buf(struct ggml_context * ctx, int i) {
  364. #if defined(LLAMA_USE_SCRATCH)
  365. size_t last_size = 0;
  366. if (i == -1) {
  367. last_size = ggml_set_scratch(ctx, { 0, 0, nullptr, });
  368. } else {
  369. auto & buf = buf_scratch[i];
  370. last_size = ggml_set_scratch(ctx, { 0, buf.size, buf.addr, });
  371. }
  372. if (buf_last >= 0) {
  373. buf_max_size[buf_last] = std::max(buf_max_size[buf_last], last_size);
  374. }
  375. buf_last = i;
  376. #else
  377. (void) i;
  378. (void) ctx;
  379. #endif
  380. }
  381. size_t get_buf_max_mem(int i) const {
  382. #if defined(LLAMA_USE_SCRATCH)
  383. return buf_max_size[i];
  384. #else
  385. (void) i;
  386. return 0;
  387. #endif
  388. }
  389. };
  390. struct llama_state {
  391. // We save the log callback globally
  392. llama_log_callback log_callback = llama_log_callback_default;
  393. void * log_callback_user_data = nullptr;
  394. };
  395. // global state
  396. static llama_state g_state;
  397. template <typename T>
  398. static T checked_mul(T a, T b) {
  399. T ret = a * b;
  400. if (a != 0 && ret / a != b) {
  401. throw std::runtime_error(format("overflow multiplying %llu * %llu",
  402. (unsigned long long) a, (unsigned long long) b));
  403. }
  404. return ret;
  405. }
  406. static size_t checked_div(size_t a, size_t b) {
  407. if (b == 0 || a % b != 0) {
  408. throw std::runtime_error(format("error dividing %zu / %zu", a, b));
  409. }
  410. return a / b;
  411. }
  412. static std::string llama_format_tensor_shape(const std::vector<uint32_t> & ne) {
  413. char buf[256];
  414. snprintf(buf, sizeof(buf), "%5u", ne.at(0));
  415. for (size_t i = 1; i < ne.size(); i++) {
  416. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5u", ne.at(i));
  417. }
  418. return buf;
  419. }
  420. static size_t llama_calc_tensor_size(const std::vector<uint32_t> & ne, enum ggml_type type) {
  421. size_t size = ggml_type_size(type);
  422. for (uint32_t dim : ne) {
  423. size = checked_mul<size_t>(size, dim);
  424. }
  425. return size / ggml_blck_size(type);
  426. }
  427. struct llama_load_tensor {
  428. std::string name;
  429. enum ggml_type type = GGML_TYPE_F32;
  430. std::vector<uint32_t> ne;
  431. size_t file_off;
  432. size_t size;
  433. struct ggml_tensor * ggml_tensor = NULL;
  434. uint8_t * data;
  435. };
  436. struct llama_load_tensors_map {
  437. // tensors is kept in a separate vector to preserve file order
  438. std::vector<llama_load_tensor> tensors;
  439. std::unordered_map<std::string, size_t> name_to_idx;
  440. };
  441. enum llama_file_version {
  442. LLAMA_FILE_VERSION_GGML,
  443. LLAMA_FILE_VERSION_GGMF_V1, // added version field and scores in vocab
  444. LLAMA_FILE_VERSION_GGJT_V1, // added padding
  445. LLAMA_FILE_VERSION_GGJT_V2, // changed quantization format
  446. LLAMA_FILE_VERSION_GGJT_V3, // changed Q4 and Q8 quantization format
  447. };
  448. struct llama_file_loader {
  449. llama_file file;
  450. llama_file_version file_version;
  451. llama_hparams hparams;
  452. llama_vocab vocab;
  453. llama_file_loader(const char * fname, llama_load_tensors_map & tensors_map)
  454. : file(fname, "rb") {
  455. LLAMA_LOG_INFO("llama.cpp: loading model from %s\n", fname);
  456. read_magic();
  457. read_hparams();
  458. read_vocab();
  459. read_tensor_metadata(tensors_map);
  460. }
  461. void read_magic() {
  462. uint32_t magic = file.read_u32();
  463. if (magic == LLAMA_FILE_MAGIC_GGML) {
  464. file_version = LLAMA_FILE_VERSION_GGML;
  465. return;
  466. }
  467. uint32_t version = file.read_u32();
  468. switch (magic) {
  469. case LLAMA_FILE_MAGIC_GGMF:
  470. switch (version) {
  471. case 1: file_version = LLAMA_FILE_VERSION_GGMF_V1; return;
  472. }
  473. break;
  474. case LLAMA_FILE_MAGIC_GGJT:
  475. switch (version) {
  476. case 1: file_version = LLAMA_FILE_VERSION_GGJT_V1; return;
  477. case 2: file_version = LLAMA_FILE_VERSION_GGJT_V2; return;
  478. case 3: file_version = LLAMA_FILE_VERSION_GGJT_V3; return;
  479. }
  480. }
  481. throw std::runtime_error(format("unknown (magic, version) combination: %08x, %08x; is this really a GGML file?",
  482. magic, version));
  483. }
  484. void read_hparams() {
  485. hparams.n_vocab = file.read_u32();
  486. hparams.n_embd = file.read_u32();
  487. hparams.n_mult = file.read_u32();
  488. hparams.n_head = file.read_u32();
  489. hparams.n_layer = file.read_u32();
  490. hparams.n_rot = file.read_u32();
  491. hparams.ftype = (enum llama_ftype) file.read_u32();
  492. // LLaMAv2
  493. // TODO: read from header
  494. hparams.n_head_kv = hparams.n_head;
  495. }
  496. void read_vocab() {
  497. vocab.id_to_token.resize(hparams.n_vocab);
  498. for (uint32_t i = 0; i < hparams.n_vocab; i++) {
  499. uint32_t len = file.read_u32();
  500. std::string word = file.read_string(len);
  501. float score = 0.0f;
  502. file.read_raw(&score, sizeof(score));
  503. vocab.token_to_id[word] = i;
  504. auto & tok_score = vocab.id_to_token[i];
  505. tok_score.tok = std::move(word);
  506. tok_score.score = score;
  507. }
  508. }
  509. void read_tensor_metadata(llama_load_tensors_map & tensors_map) {
  510. while (file.tell() < file.size) {
  511. llama_load_tensor tensor;
  512. uint32_t n_dims = file.read_u32();
  513. uint32_t name_len = file.read_u32();
  514. tensor.type = (enum ggml_type) file.read_u32();
  515. tensor.ne.resize(n_dims);
  516. file.read_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * n_dims);
  517. std::string name = file.read_string(name_len);
  518. if (n_dims < 1 || n_dims > 2) {
  519. throw std::runtime_error(format("llama.cpp: tensor '%s' should not be %u-dimensional", name.c_str(), n_dims));
  520. }
  521. switch (tensor.type) {
  522. case GGML_TYPE_F32:
  523. case GGML_TYPE_F16:
  524. case GGML_TYPE_Q4_0:
  525. case GGML_TYPE_Q4_1:
  526. case GGML_TYPE_Q5_0:
  527. case GGML_TYPE_Q5_1:
  528. case GGML_TYPE_Q8_0:
  529. case GGML_TYPE_Q2_K:
  530. case GGML_TYPE_Q3_K:
  531. case GGML_TYPE_Q4_K:
  532. case GGML_TYPE_Q5_K:
  533. case GGML_TYPE_Q6_K:
  534. break;
  535. default: {
  536. throw std::runtime_error(format("unrecognized tensor type %u\n", tensor.type));
  537. }
  538. }
  539. // skip to the next multiple of 32 bytes
  540. if (file_version >= LLAMA_FILE_VERSION_GGJT_V1) {
  541. file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
  542. }
  543. tensor.file_off = file.tell();
  544. tensor.name = name;
  545. tensor.size = llama_calc_tensor_size(tensor.ne, tensor.type);
  546. file.seek(tensor.size, SEEK_CUR);
  547. tensors_map.tensors.push_back(tensor);
  548. tensors_map.name_to_idx[name] = tensors_map.tensors.size() - 1;
  549. }
  550. }
  551. };
  552. struct llama_file_saver {
  553. llama_file file;
  554. llama_file_loader * any_file_loader;
  555. llama_file_saver(const char * fname, llama_file_loader * any_file_loader, enum llama_ftype new_ftype)
  556. : file(fname, "wb"), any_file_loader(any_file_loader) {
  557. LLAMA_LOG_INFO("llama.cpp: saving model to %s\n", fname);
  558. write_magic();
  559. write_hparams(new_ftype);
  560. write_vocab();
  561. }
  562. void write_magic() {
  563. file.write_u32(LLAMA_FILE_MAGIC); // magic
  564. file.write_u32(LLAMA_FILE_VERSION); // version
  565. }
  566. void write_hparams(enum llama_ftype new_ftype) {
  567. const llama_hparams & hparams = any_file_loader->hparams;
  568. file.write_u32(hparams.n_vocab);
  569. file.write_u32(hparams.n_embd);
  570. file.write_u32(hparams.n_mult);
  571. file.write_u32(hparams.n_head);
  572. file.write_u32(hparams.n_layer);
  573. file.write_u32(hparams.n_rot);
  574. file.write_u32(new_ftype);
  575. }
  576. void write_vocab() {
  577. if (any_file_loader->file_version == LLAMA_FILE_VERSION_GGML) {
  578. LLAMA_LOG_WARN("llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n");
  579. }
  580. uint32_t n_vocab = any_file_loader->hparams.n_vocab;
  581. for (uint32_t i = 0; i < n_vocab; i++) {
  582. const auto & token_score = any_file_loader->vocab.id_to_token.at(i);
  583. file.write_u32((uint32_t) token_score.tok.size());
  584. file.write_raw(token_score.tok.data(), token_score.tok.size());
  585. file.write_raw(&token_score.score, sizeof(token_score.score));
  586. }
  587. }
  588. void write_tensor(llama_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) {
  589. switch (new_type) {
  590. case GGML_TYPE_F32:
  591. case GGML_TYPE_F16:
  592. case GGML_TYPE_Q4_0:
  593. case GGML_TYPE_Q4_1:
  594. case GGML_TYPE_Q5_0:
  595. case GGML_TYPE_Q5_1:
  596. case GGML_TYPE_Q8_0:
  597. case GGML_TYPE_Q2_K:
  598. case GGML_TYPE_Q3_K:
  599. case GGML_TYPE_Q4_K:
  600. case GGML_TYPE_Q5_K:
  601. case GGML_TYPE_Q6_K:
  602. break;
  603. default: LLAMA_ASSERT(false);
  604. }
  605. file.write_u32((uint32_t) tensor.ne.size());
  606. file.write_u32((uint32_t) tensor.name.size());
  607. file.write_u32(new_type);
  608. file.write_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * tensor.ne.size());
  609. file.write_raw(tensor.name.data(), tensor.name.size());
  610. file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
  611. LLAMA_ASSERT(new_size == llama_calc_tensor_size(tensor.ne, new_type));
  612. file.write_raw(new_data, new_size);
  613. }
  614. };
  615. struct llama_model_loader {
  616. std::unique_ptr<llama_file_loader> file_loader;
  617. llama_load_tensors_map tensors_map;
  618. bool use_mmap;
  619. size_t num_ggml_tensors_created = 0;
  620. struct ggml_context * ggml_ctx = NULL;
  621. std::unique_ptr<llama_mmap> mapping;
  622. llama_model_loader(const std::string & fname_base, bool use_mmap) {
  623. file_loader = std::unique_ptr<llama_file_loader>(new llama_file_loader(fname_base.c_str(), tensors_map));
  624. if (!llama_mmap::SUPPORTED) {
  625. use_mmap = false;
  626. }
  627. this->use_mmap = use_mmap;
  628. }
  629. void calc_sizes(size_t * ctx_size_p, size_t * mmapped_size_p) const {
  630. *ctx_size_p = *mmapped_size_p = 0;
  631. for (const llama_load_tensor & lt : tensors_map.tensors) {
  632. *ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE;
  633. *(use_mmap ? mmapped_size_p : ctx_size_p) += lt.size + 16;
  634. }
  635. }
  636. struct ggml_tensor * get_tensor(const std::string & name, const std::vector<uint32_t> & ne, ggml_backend backend) {
  637. auto it = tensors_map.name_to_idx.find(name);
  638. if (it == tensors_map.name_to_idx.end()) {
  639. throw std::runtime_error(std::runtime_error(format("llama.cpp: tensor '%s' is missing from model", name.c_str())));
  640. }
  641. llama_load_tensor & lt = tensors_map.tensors.at(it->second);
  642. if (lt.ne != ne) {
  643. throw std::runtime_error(format("llama.cpp: tensor '%s' has wrong shape; expected %s, got %s",
  644. name.c_str(), llama_format_tensor_shape(ne).c_str(), llama_format_tensor_shape(lt.ne).c_str()));
  645. }
  646. return get_tensor_for(lt, backend);
  647. }
  648. struct ggml_tensor * get_tensor_for(llama_load_tensor & lt, ggml_backend backend) {
  649. struct ggml_tensor * tensor;
  650. if (backend != GGML_BACKEND_CPU) {
  651. ggml_set_no_alloc(ggml_ctx, true);
  652. }
  653. if (lt.ne.size() == 2) {
  654. tensor = ggml_new_tensor_2d(ggml_ctx, lt.type, lt.ne.at(0), lt.ne.at(1));
  655. } else {
  656. LLAMA_ASSERT(lt.ne.size() == 1);
  657. tensor = ggml_new_tensor_1d(ggml_ctx, lt.type, lt.ne.at(0));
  658. }
  659. ggml_set_name(tensor, lt.name.c_str());
  660. LLAMA_ASSERT(lt.ggml_tensor == NULL); // if this fails, we called get_tensor twice on the same tensor
  661. if (backend != GGML_BACKEND_CPU) {
  662. ggml_set_no_alloc(ggml_ctx, use_mmap);
  663. }
  664. tensor->backend = backend;
  665. lt.ggml_tensor = tensor;
  666. num_ggml_tensors_created++;
  667. return tensor;
  668. }
  669. void done_getting_tensors() const {
  670. if (num_ggml_tensors_created != tensors_map.tensors.size()) {
  671. throw std::runtime_error(std::string("llama.cpp: file contained more tensors than expected"));
  672. }
  673. }
  674. void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) {
  675. size_t data_size = 0;
  676. size_t prefetch_size = file_loader->file.size;
  677. size_t lock_size = 0;
  678. for (const llama_load_tensor & lt : tensors_map.tensors) {
  679. data_size += lt.size;
  680. if (lt.ggml_tensor->backend != GGML_BACKEND_CPU) {
  681. prefetch_size -= lt.size;
  682. }
  683. }
  684. if (use_mmap) {
  685. mapping.reset(new llama_mmap(&file_loader->file, prefetch_size, ggml_is_numa()));
  686. if (lmlock) {
  687. lmlock->init(mapping->addr);
  688. }
  689. }
  690. size_t done_size = 0;
  691. for (llama_load_tensor & lt : tensors_map.tensors) {
  692. if (progress_callback) {
  693. progress_callback((float) done_size / data_size, progress_callback_user_data);
  694. }
  695. LLAMA_ASSERT(lt.ggml_tensor); // unused tensors should have been caught by load_data already
  696. lt.data = (uint8_t *) lt.ggml_tensor->data;
  697. // allocate temp buffer if not using mmap
  698. if (!use_mmap && lt.data == NULL) {
  699. GGML_ASSERT(lt.ggml_tensor->backend != GGML_BACKEND_CPU);
  700. lt.data = (uint8_t*)malloc(ggml_nbytes(lt.ggml_tensor));
  701. }
  702. load_data_for(lt);
  703. switch(lt.ggml_tensor->backend) {
  704. case GGML_BACKEND_CPU:
  705. lt.ggml_tensor->data = lt.data;
  706. if (use_mmap && lmlock) {
  707. lock_size += lt.size;
  708. lmlock->grow_to(lock_size);
  709. }
  710. break;
  711. #if defined(GGML_USE_CUBLAS)
  712. case GGML_BACKEND_GPU:
  713. case GGML_BACKEND_GPU_SPLIT:
  714. ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor);
  715. if (!use_mmap) {
  716. free(lt.data);
  717. }
  718. break;
  719. #elif defined(GGML_USE_CLBLAST)
  720. case GGML_BACKEND_GPU:
  721. ggml_cl_transform_tensor(lt.data, lt.ggml_tensor);
  722. if (!use_mmap) {
  723. free(lt.data);
  724. }
  725. break;
  726. #endif
  727. default:
  728. continue;
  729. }
  730. done_size += lt.size;
  731. }
  732. }
  733. void load_data_for(llama_load_tensor & lt) {
  734. if (use_mmap) {
  735. lt.data = (uint8_t *) mapping->addr + lt.file_off;
  736. } else {
  737. llama_file & file = file_loader->file;
  738. file.seek(lt.file_off, SEEK_SET);
  739. file.read_raw(lt.data, lt.size);
  740. }
  741. if (0) {
  742. print_checksum(lt);
  743. }
  744. }
  745. static void print_checksum(llama_load_tensor & lt) {
  746. uint32_t sum = 0;
  747. for (size_t i = 0; i < lt.size; i++) {
  748. uint8_t byte = lt.data[i];
  749. sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash
  750. }
  751. LLAMA_LOG_INFO("%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum,
  752. llama_format_tensor_shape(lt.ne).c_str(), lt.size);
  753. }
  754. };
  755. //
  756. // kv cache
  757. //
  758. static bool kv_cache_init(
  759. const struct llama_hparams & hparams,
  760. struct llama_kv_cache & cache,
  761. ggml_type wtype,
  762. int n_ctx,
  763. int n_gpu_layers) {
  764. const int n_embd = hparams.n_embd_gqa();
  765. const int n_layer = hparams.n_layer;
  766. const int64_t n_mem = n_layer*n_ctx;
  767. const int64_t n_elements = n_embd*n_mem;
  768. cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
  769. cache.n = 0;
  770. struct ggml_init_params params;
  771. params.mem_size = cache.buf.size;
  772. params.mem_buffer = cache.buf.addr;
  773. params.no_alloc = false;
  774. cache.ctx = ggml_init(params);
  775. if (!cache.ctx) {
  776. LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__);
  777. return false;
  778. }
  779. cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
  780. cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
  781. ggml_set_name(cache.k, "cache_k");
  782. ggml_set_name(cache.v, "cache_v");
  783. (void) n_gpu_layers;
  784. #ifdef GGML_USE_CUBLAS
  785. if (n_gpu_layers > n_layer + 1) {
  786. ggml_cuda_assign_buffers_no_scratch(cache.v);
  787. }
  788. if (n_gpu_layers > n_layer + 2) {
  789. ggml_cuda_assign_buffers_no_scratch(cache.k);
  790. }
  791. #endif // GGML_USE_CUBLAS
  792. return true;
  793. }
  794. struct llama_context_params llama_context_default_params() {
  795. struct llama_context_params result = {
  796. /*.seed =*/ LLAMA_DEFAULT_SEED,
  797. /*.n_ctx =*/ 512,
  798. /*.n_batch =*/ 512,
  799. /*.n_gqa =*/ 1,
  800. /*.rms_norm_eps =*/ LLAMA_DEFAULT_RMS_EPS,
  801. /*.gpu_layers =*/ 0,
  802. /*.main_gpu =*/ 0,
  803. /*.tensor_split =*/ nullptr,
  804. /*.rope_freq_base =*/ 10000.0f,
  805. /*.rope_freq_scale =*/ 1.0f,
  806. /*.progress_callback =*/ nullptr,
  807. /*.progress_callback_user_data =*/ nullptr,
  808. /*.low_vram =*/ false,
  809. /*.mul_mat_q =*/ false,
  810. /*.f16_kv =*/ true,
  811. /*.logits_all =*/ false,
  812. /*.vocab_only =*/ false,
  813. /*.use_mmap =*/ true,
  814. /*.use_mlock =*/ false,
  815. /*.embedding =*/ false,
  816. };
  817. return result;
  818. }
  819. struct llama_model_quantize_params llama_model_quantize_default_params() {
  820. struct llama_model_quantize_params result = {
  821. /*.nthread =*/ 0,
  822. /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
  823. /*.allow_requantize =*/ false,
  824. /*.quantize_output_tensor =*/ true,
  825. };
  826. return result;
  827. }
  828. int llama_max_devices() {
  829. return LLAMA_MAX_DEVICES;
  830. }
  831. bool llama_mmap_supported() {
  832. return llama_mmap::SUPPORTED;
  833. }
  834. bool llama_mlock_supported() {
  835. return llama_mlock::SUPPORTED;
  836. }
  837. void llama_backend_init(bool numa) {
  838. ggml_time_init();
  839. // needed to initialize f16 tables
  840. {
  841. struct ggml_init_params params = { 0, NULL, false };
  842. struct ggml_context * ctx = ggml_init(params);
  843. ggml_free(ctx);
  844. }
  845. if (numa) {
  846. ggml_numa_init();
  847. }
  848. #ifdef GGML_USE_MPI
  849. ggml_mpi_backend_init();
  850. #endif
  851. }
  852. void llama_backend_free() {
  853. #ifdef GGML_USE_MPI
  854. ggml_mpi_backend_free();
  855. #endif
  856. }
  857. int64_t llama_time_us() {
  858. return ggml_time_us();
  859. }
  860. //
  861. // model loading
  862. //
  863. static const char *llama_file_version_name(llama_file_version version) {
  864. switch (version) {
  865. case LLAMA_FILE_VERSION_GGML: return "'ggml' (old version with low tokenizer quality and no mmap support)";
  866. case LLAMA_FILE_VERSION_GGMF_V1: return "ggmf v1 (old version with no mmap support)";
  867. case LLAMA_FILE_VERSION_GGJT_V1: return "ggjt v1 (pre #1405)";
  868. case LLAMA_FILE_VERSION_GGJT_V2: return "ggjt v2 (pre #1508)";
  869. case LLAMA_FILE_VERSION_GGJT_V3: return "ggjt v3 (latest)";
  870. }
  871. return "unknown";
  872. }
  873. static const char *llama_ftype_name(enum llama_ftype ftype) {
  874. switch (ftype) {
  875. case LLAMA_FTYPE_ALL_F32: return "all F32";
  876. case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16";
  877. case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
  878. case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
  879. case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
  880. return "mostly Q4_1, some F16";
  881. case LLAMA_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0";
  882. case LLAMA_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1";
  883. case LLAMA_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0";
  884. // K-quants
  885. case LLAMA_FTYPE_MOSTLY_Q2_K: return "mostly Q2_K";
  886. case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "mostly Q3_K - Small";
  887. case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "mostly Q3_K - Medium";
  888. case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "mostly Q3_K - Large";
  889. case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "mostly Q4_K - Small";
  890. case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "mostly Q4_K - Medium";
  891. case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "mostly Q5_K - Small";
  892. case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "mostly Q5_K - Medium";
  893. case LLAMA_FTYPE_MOSTLY_Q6_K: return "mostly Q6_K";
  894. default: return "unknown, may not work";
  895. }
  896. }
  897. static const char *llama_model_type_name(e_model type) {
  898. switch (type) {
  899. case MODEL_3B: return "3B";
  900. case MODEL_7B: return "7B";
  901. case MODEL_13B: return "13B";
  902. case MODEL_30B: return "30B";
  903. case MODEL_34B: return "34B";
  904. case MODEL_65B: return "65B";
  905. case MODEL_70B: return "70B";
  906. default: LLAMA_ASSERT(false);
  907. }
  908. }
  909. static void llama_model_load_internal(
  910. const std::string & fname,
  911. llama_model & model,
  912. llama_vocab & vocab,
  913. int n_ctx,
  914. int n_batch,
  915. int n_gqa,
  916. float rms_norm_eps,
  917. int n_gpu_layers,
  918. int main_gpu,
  919. const float * tensor_split,
  920. const bool mul_mat_q,
  921. float rope_freq_base,
  922. float rope_freq_scale,
  923. bool low_vram,
  924. ggml_type memory_type,
  925. bool use_mmap,
  926. bool use_mlock,
  927. bool vocab_only,
  928. llama_progress_callback progress_callback,
  929. void * progress_callback_user_data) {
  930. model.t_start_us = ggml_time_us();
  931. std::unique_ptr<llama_model_loader> ml(new llama_model_loader(fname, use_mmap));
  932. vocab = std::move(ml->file_loader->vocab);
  933. model.hparams = ml->file_loader->hparams;
  934. model.n_gpu_layers = n_gpu_layers;
  935. llama_file_version file_version = ml->file_loader->file_version;
  936. auto & hparams = model.hparams;
  937. // TODO: read from file
  938. hparams.f_rms_norm_eps = rms_norm_eps;
  939. {
  940. switch (hparams.n_layer) {
  941. case 26: model.type = e_model::MODEL_3B; break;
  942. case 32: model.type = e_model::MODEL_7B; break;
  943. case 40: model.type = e_model::MODEL_13B; break;
  944. case 48: model.type = e_model::MODEL_34B; break;
  945. case 60: model.type = e_model::MODEL_30B; break;
  946. case 80: model.type = e_model::MODEL_65B; break;
  947. default:
  948. {
  949. if (hparams.n_layer < 32) {
  950. model.type = e_model::MODEL_7B;
  951. }
  952. } break;
  953. }
  954. hparams.n_ctx = n_ctx;
  955. // LLaMAv2
  956. // TODO: temporary until GGUF
  957. LLAMA_ASSERT(hparams.n_head % n_gqa == 0);
  958. hparams.n_head_kv = hparams.n_head / n_gqa;
  959. if (model.type == e_model::MODEL_65B && n_gqa == 8) {
  960. LLAMA_LOG_WARN("%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa);
  961. model.type = e_model::MODEL_70B;
  962. hparams.f_ffn_mult = 1.3f; // from the params.json of the 70B model
  963. }
  964. hparams.rope_freq_base = rope_freq_base;
  965. hparams.rope_freq_scale = rope_freq_scale;
  966. }
  967. // ref: https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/model.py#L194-L199
  968. const uint32_t n_ff_raw = 2*(4*hparams.n_embd)/3;
  969. const uint32_t n_ff_mult = hparams.f_ffn_mult*n_ff_raw;
  970. const uint32_t n_ff = ((n_ff_mult + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
  971. //const uint32_t n_ff = 28672;
  972. {
  973. LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(file_version));
  974. LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
  975. LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx);
  976. LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
  977. LLAMA_LOG_INFO("%s: n_mult = %u\n", __func__, hparams.n_mult);
  978. LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
  979. LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
  980. LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
  981. LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
  982. LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
  983. LLAMA_LOG_INFO("%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps);
  984. LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, n_ff);
  985. LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base);
  986. LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale);
  987. LLAMA_LOG_INFO("%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype));
  988. LLAMA_LOG_INFO("%s: model size = %s\n", __func__, llama_model_type_name(model.type));
  989. }
  990. if (file_version < LLAMA_FILE_VERSION_GGJT_V2) {
  991. if (hparams.ftype != LLAMA_FTYPE_ALL_F32 &&
  992. hparams.ftype != LLAMA_FTYPE_MOSTLY_F16 &&
  993. hparams.ftype != LLAMA_FTYPE_MOSTLY_Q8_0) {
  994. throw std::runtime_error(format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1405)"));
  995. }
  996. }
  997. if (file_version < LLAMA_FILE_VERSION_GGJT_V3) {
  998. if (hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ||
  999. hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_1 ||
  1000. hparams.ftype == LLAMA_FTYPE_MOSTLY_Q8_0) {
  1001. throw std::runtime_error(format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1508)"));
  1002. }
  1003. }
  1004. if (vocab_only) {
  1005. return;
  1006. }
  1007. auto & ctx = model.ctx;
  1008. size_t ctx_size;
  1009. size_t mmapped_size;
  1010. ml->calc_sizes(&ctx_size, &mmapped_size);
  1011. LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
  1012. // create the ggml context
  1013. {
  1014. model.buf.resize(ctx_size);
  1015. if (use_mlock) {
  1016. model.mlock_buf.init (model.buf.addr);
  1017. model.mlock_buf.grow_to(model.buf.size);
  1018. }
  1019. struct ggml_init_params params = {
  1020. /*.mem_size =*/ model.buf.size,
  1021. /*.mem_buffer =*/ model.buf.addr,
  1022. /*.no_alloc =*/ ml->use_mmap,
  1023. };
  1024. model.ctx = ggml_init(params);
  1025. if (!model.ctx) {
  1026. throw std::runtime_error(format("ggml_init() failed"));
  1027. }
  1028. }
  1029. (void) main_gpu;
  1030. (void) mul_mat_q;
  1031. #if defined(GGML_USE_CUBLAS)
  1032. LLAMA_LOG_INFO("%s: using CUDA for GPU acceleration\n", __func__);
  1033. ggml_cuda_set_main_device(main_gpu);
  1034. ggml_cuda_set_mul_mat_q(mul_mat_q);
  1035. #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
  1036. #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT
  1037. #elif defined(GGML_USE_CLBLAST)
  1038. LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__);
  1039. #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
  1040. #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU
  1041. #else
  1042. #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU
  1043. #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_CPU
  1044. #endif
  1045. // prepare memory for the weights
  1046. size_t vram_weights = 0;
  1047. size_t vram_scratch = 0;
  1048. {
  1049. const uint32_t n_embd = hparams.n_embd;
  1050. const uint32_t n_embd_gqa = hparams.n_embd_gqa();
  1051. const uint32_t n_layer = hparams.n_layer;
  1052. const uint32_t n_vocab = hparams.n_vocab;
  1053. ml->ggml_ctx = ctx;
  1054. model.tok_embeddings = ml->get_tensor("tok_embeddings.weight", {n_embd, n_vocab}, GGML_BACKEND_CPU);
  1055. // "output" tensor
  1056. {
  1057. ggml_backend backend_norm;
  1058. ggml_backend backend_output;
  1059. if (n_gpu_layers > int(n_layer)) { // NOLINT
  1060. // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
  1061. // on Windows however this is detrimental unless everything is on the GPU
  1062. #ifndef _WIN32
  1063. backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
  1064. #else
  1065. backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
  1066. #endif // _WIN32
  1067. backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
  1068. } else {
  1069. backend_norm = GGML_BACKEND_CPU;
  1070. backend_output = GGML_BACKEND_CPU;
  1071. }
  1072. model.norm = ml->get_tensor("norm.weight", {n_embd}, backend_norm);
  1073. model.output = ml->get_tensor("output.weight", {n_embd, n_vocab}, backend_output);
  1074. if (backend_norm == GGML_BACKEND_GPU) {
  1075. vram_weights += ggml_nbytes(model.norm);
  1076. }
  1077. if (backend_output == GGML_BACKEND_GPU_SPLIT) {
  1078. vram_weights += ggml_nbytes(model.output);
  1079. }
  1080. }
  1081. const int i_gpu_start = n_layer - n_gpu_layers;
  1082. model.layers.resize(n_layer);
  1083. for (uint32_t i = 0; i < n_layer; ++i) {
  1084. const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
  1085. const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
  1086. auto & layer = model.layers[i];
  1087. std::string layers_i = "layers." + std::to_string(i);
  1088. layer.attention_norm = ml->get_tensor(layers_i + ".attention_norm.weight", {n_embd}, backend);
  1089. layer.wq = ml->get_tensor(layers_i + ".attention.wq.weight", {n_embd, n_embd}, backend_split);
  1090. layer.wk = ml->get_tensor(layers_i + ".attention.wk.weight", {n_embd, n_embd_gqa}, backend_split);
  1091. layer.wv = ml->get_tensor(layers_i + ".attention.wv.weight", {n_embd, n_embd_gqa}, backend_split);
  1092. layer.wo = ml->get_tensor(layers_i + ".attention.wo.weight", {n_embd, n_embd}, backend_split);
  1093. layer.ffn_norm = ml->get_tensor(layers_i + ".ffn_norm.weight", {n_embd}, backend);
  1094. layer.w1 = ml->get_tensor(layers_i + ".feed_forward.w1.weight", {n_embd, n_ff}, backend_split);
  1095. layer.w2 = ml->get_tensor(layers_i + ".feed_forward.w2.weight", { n_ff, n_embd}, backend_split);
  1096. layer.w3 = ml->get_tensor(layers_i + ".feed_forward.w3.weight", {n_embd, n_ff}, backend_split);
  1097. if (backend == GGML_BACKEND_GPU) {
  1098. vram_weights +=
  1099. ggml_nbytes(layer.attention_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
  1100. ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
  1101. ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3);
  1102. }
  1103. }
  1104. }
  1105. ml->done_getting_tensors();
  1106. // print memory requirements
  1107. {
  1108. const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1;
  1109. // this is the total memory required to run the inference
  1110. size_t mem_required =
  1111. ctx_size +
  1112. mmapped_size - vram_weights; // weights in VRAM not in memory
  1113. #ifndef LLAMA_USE_ALLOCATOR
  1114. mem_required +=
  1115. MEM_REQ_SCRATCH0(hparams.n_ctx).at(model.type) +
  1116. MEM_REQ_SCRATCH1().at(model.type) +
  1117. MEM_REQ_EVAL().at(model.type);
  1118. #endif
  1119. // this is the memory required by one llama_state
  1120. const size_t mem_required_state =
  1121. scale*hparams.kv_size();
  1122. LLAMA_LOG_INFO("%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
  1123. mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
  1124. (void) vram_scratch;
  1125. (void) n_batch;
  1126. #ifdef GGML_USE_CUBLAS
  1127. if (low_vram) {
  1128. LLAMA_LOG_INFO("%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__);
  1129. ggml_cuda_set_scratch_size(0); // disable scratch
  1130. } else {
  1131. const size_t vram_scratch_base = VRAM_REQ_SCRATCH_BASE().at(model.type);
  1132. const size_t vram_scratch_per_context = VRAM_REQ_SCRATCH_PER_CONTEXT().at(model.type);
  1133. vram_scratch = n_batch * (vram_scratch_base + n_ctx * vram_scratch_per_context);
  1134. ggml_cuda_set_scratch_size(vram_scratch);
  1135. if (n_gpu_layers > 0) {
  1136. LLAMA_LOG_INFO("%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n",
  1137. __func__, vram_scratch_base / kB, vram_scratch_per_context,
  1138. (vram_scratch + MB - 1) / MB); // round up
  1139. }
  1140. }
  1141. #endif // GGML_USE_CUBLAS
  1142. #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  1143. const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
  1144. LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
  1145. if (n_gpu_layers > (int) hparams.n_layer) {
  1146. LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
  1147. }
  1148. size_t vram_kv_cache = 0;
  1149. #ifdef GGML_USE_CUBLAS
  1150. const int max_backend_supported_layers = hparams.n_layer + 3;
  1151. const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
  1152. if (n_gpu_layers > (int) hparams.n_layer + 1) {
  1153. if (low_vram) {
  1154. LLAMA_LOG_INFO("%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
  1155. } else {
  1156. LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__);
  1157. vram_kv_cache += hparams.kv_size() / 2;
  1158. }
  1159. }
  1160. if (n_gpu_layers > (int) hparams.n_layer + 2) {
  1161. if (low_vram) {
  1162. LLAMA_LOG_WARN("%s: cannot offload k cache to GPU due to low VRAM option\n", __func__);
  1163. } else {
  1164. LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__);
  1165. vram_kv_cache += hparams.kv_size() / 2;
  1166. }
  1167. }
  1168. #elif defined(GGML_USE_CLBLAST)
  1169. const int max_backend_supported_layers = hparams.n_layer + 1;
  1170. const int max_offloadable_layers = hparams.n_layer + 1;
  1171. #endif // GGML_USE_CUBLAS
  1172. LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n",
  1173. __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
  1174. LLAMA_LOG_INFO("%s: total VRAM used: %zu MB\n",
  1175. __func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up
  1176. #else
  1177. (void) n_gpu_layers;
  1178. #endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
  1179. }
  1180. // populate `tensors_by_name`
  1181. for (llama_load_tensor & lt : ml->tensors_map.tensors) {
  1182. model.tensors_by_name.emplace_back(lt.name, lt.ggml_tensor);
  1183. }
  1184. (void) tensor_split;
  1185. #if defined(GGML_USE_CUBLAS)
  1186. {
  1187. ggml_cuda_set_tensor_split(tensor_split);
  1188. }
  1189. #endif
  1190. ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL);
  1191. if (progress_callback) {
  1192. progress_callback(1.0f, progress_callback_user_data);
  1193. }
  1194. model.mapping = std::move(ml->mapping);
  1195. // loading time will be recalculate after the first eval, so
  1196. // we take page faults deferred by mmap() into consideration
  1197. model.t_load_us = ggml_time_us() - model.t_start_us;
  1198. }
  1199. static bool llama_model_load(
  1200. const std::string & fname,
  1201. llama_model & model,
  1202. llama_vocab & vocab,
  1203. int n_ctx,
  1204. int n_batch,
  1205. int n_gqa,
  1206. float rms_norm_eps,
  1207. int n_gpu_layers,
  1208. int main_gpu,
  1209. const float * tensor_split,
  1210. const bool mul_mat_q,
  1211. float rope_freq_base,
  1212. float rope_freq_scale,
  1213. bool low_vram,
  1214. ggml_type memory_type,
  1215. bool use_mmap,
  1216. bool use_mlock,
  1217. bool vocab_only,
  1218. llama_progress_callback progress_callback,
  1219. void *progress_callback_user_data) {
  1220. try {
  1221. llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gqa, rms_norm_eps, n_gpu_layers,
  1222. main_gpu, tensor_split, mul_mat_q, rope_freq_base, rope_freq_scale, low_vram, memory_type,
  1223. use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data);
  1224. return true;
  1225. } catch (const std::exception & err) {
  1226. LLAMA_LOG_ERROR("error loading model: %s\n", err.what());
  1227. return false;
  1228. }
  1229. }
  1230. static struct ggml_cgraph * llama_build_graph(
  1231. llama_context & lctx,
  1232. const llama_token * tokens,
  1233. const float * embd,
  1234. int n_tokens,
  1235. int n_past) {
  1236. LLAMA_ASSERT((!tokens && embd) || (tokens && !embd));
  1237. const int N = n_tokens;
  1238. const auto & model = lctx.model;
  1239. const auto & hparams = model.hparams;
  1240. const auto & kv_self = lctx.kv_self;
  1241. LLAMA_ASSERT(!!kv_self.ctx);
  1242. const int64_t n_embd = hparams.n_embd;
  1243. const int64_t n_layer = hparams.n_layer;
  1244. const int64_t n_ctx = hparams.n_ctx;
  1245. const int64_t n_head = hparams.n_head;
  1246. const int64_t n_head_kv = hparams.n_head_kv;
  1247. const int64_t n_embd_head = hparams.n_embd_head();
  1248. const int64_t n_embd_gqa = hparams.n_embd_gqa();
  1249. LLAMA_ASSERT(n_embd_head == hparams.n_rot);
  1250. const float freq_base = hparams.rope_freq_base;
  1251. const float freq_scale = hparams.rope_freq_scale;
  1252. const float rms_norm_eps = hparams.f_rms_norm_eps;
  1253. const int n_gpu_layers = model.n_gpu_layers;
  1254. auto & mem_per_token = lctx.mem_per_token;
  1255. auto & buf_compute = lctx.buf_compute;
  1256. struct ggml_init_params params = {
  1257. /*.mem_size =*/ buf_compute.size,
  1258. /*.mem_buffer =*/ buf_compute.addr,
  1259. /*.no_alloc =*/ false,
  1260. };
  1261. #ifdef LLAMA_USE_ALLOCATOR
  1262. params.no_alloc = true;
  1263. #endif
  1264. struct ggml_context * ctx0 = ggml_init(params);
  1265. ggml_cgraph * gf = ggml_new_graph(ctx0);
  1266. struct ggml_tensor * cur;
  1267. struct ggml_tensor * inpL;
  1268. if (tokens) {
  1269. struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
  1270. #ifdef LLAMA_USE_ALLOCATOR
  1271. ggml_allocr_alloc(lctx.alloc, inp_tokens);
  1272. if (!ggml_allocr_is_measure(lctx.alloc)) {
  1273. memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
  1274. }
  1275. #else
  1276. memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
  1277. #endif
  1278. ggml_set_name(inp_tokens, "inp_tokens");
  1279. inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
  1280. } else {
  1281. #ifdef GGML_USE_MPI
  1282. GGML_ASSERT(false && "not implemented");
  1283. #endif
  1284. inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N);
  1285. #ifdef LLAMA_USE_ALLOCATOR
  1286. ggml_allocr_alloc(lctx.alloc, inpL);
  1287. if (!ggml_allocr_is_measure(lctx.alloc)) {
  1288. memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
  1289. }
  1290. #else
  1291. memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
  1292. #endif
  1293. }
  1294. const int i_gpu_start = n_layer - n_gpu_layers;
  1295. (void) i_gpu_start;
  1296. // offload functions set the tensor output backend to GPU
  1297. // tensors are GPU-accelerated if any input or the output has been offloaded
  1298. //
  1299. // with the low VRAM option VRAM scratch is disabled in llama_load_model_internal
  1300. // in that case ggml_cuda_assign_buffers has no effect
  1301. offload_func_t offload_func_nr = llama_nop; // nr = non-repeating
  1302. offload_func_t offload_func_kq = llama_nop;
  1303. offload_func_t offload_func_v = llama_nop;
  1304. #ifdef GGML_USE_CUBLAS
  1305. if (n_gpu_layers > n_layer) {
  1306. offload_func_nr = ggml_cuda_assign_buffers;
  1307. }
  1308. if (n_gpu_layers > n_layer + 1) {
  1309. offload_func_v = ggml_cuda_assign_buffers;
  1310. }
  1311. if (n_gpu_layers > n_layer + 2) {
  1312. offload_func_kq = ggml_cuda_assign_buffers;
  1313. }
  1314. #endif // GGML_USE_CUBLAS
  1315. struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
  1316. #ifdef LLAMA_USE_ALLOCATOR
  1317. ggml_allocr_alloc(lctx.alloc, KQ_scale);
  1318. if (!ggml_allocr_is_measure(lctx.alloc)) {
  1319. ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
  1320. }
  1321. #else
  1322. ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
  1323. #endif
  1324. ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
  1325. for (int il = 0; il < n_layer; ++il) {
  1326. ggml_format_name(inpL, "layer_inp_%d", il);
  1327. offload_func_t offload_func = llama_nop;
  1328. #ifdef GGML_USE_CUBLAS
  1329. if (il >= i_gpu_start) {
  1330. offload_func = ggml_cuda_assign_buffers;
  1331. }
  1332. #endif // GGML_USE_CUBLAS
  1333. struct ggml_tensor * inpSA = inpL;
  1334. lctx.use_buf(ctx0, 0);
  1335. // norm
  1336. {
  1337. cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
  1338. offload_func(cur);
  1339. ggml_set_name(cur, "rms_norm_0");
  1340. // cur = cur*attention_norm(broadcasted)
  1341. cur = ggml_mul(ctx0, cur, model.layers[il].attention_norm);
  1342. offload_func(cur);
  1343. ggml_set_name(cur, "attention_norm_0");
  1344. }
  1345. // self-attention
  1346. {
  1347. // compute Q and K and RoPE them
  1348. struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
  1349. offload_func_kq(tmpk);
  1350. ggml_set_name(tmpk, "tmpk");
  1351. struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
  1352. offload_func_kq(tmpq);
  1353. ggml_set_name(tmpq, "tmpq");
  1354. struct ggml_tensor * Kcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
  1355. offload_func_kq(Kcur);
  1356. ggml_set_name(Kcur, "Kcur");
  1357. struct ggml_tensor * Qcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
  1358. offload_func_kq(Qcur);
  1359. ggml_set_name(Qcur, "Qcur");
  1360. // store key and value to memory
  1361. {
  1362. // compute the transposed [N, n_embd] V matrix
  1363. struct ggml_tensor * tmpv = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
  1364. offload_func_v(tmpv);
  1365. ggml_set_name(tmpv, "tmpv");
  1366. struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, N));
  1367. offload_func_v(Vcur);
  1368. ggml_set_name(Vcur, "Vcur");
  1369. struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + n_past));
  1370. offload_func_kq(k);
  1371. ggml_set_name(k, "k");
  1372. struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd_gqa,
  1373. ( n_ctx)*ggml_element_size(kv_self.v),
  1374. (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + n_past*ggml_element_size(kv_self.v));
  1375. offload_func_v(v);
  1376. ggml_set_name(v, "v");
  1377. // important: storing RoPE-ed version of K in the KV cache!
  1378. ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
  1379. ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
  1380. }
  1381. struct ggml_tensor * Q =
  1382. ggml_permute(ctx0,
  1383. Qcur,
  1384. 0, 2, 1, 3);
  1385. offload_func_kq(Q);
  1386. ggml_set_name(Q, "Q");
  1387. struct ggml_tensor * K =
  1388. ggml_permute(ctx0,
  1389. ggml_reshape_3d(ctx0,
  1390. ggml_view_1d(ctx0, kv_self.k, (n_past + N)*n_embd_gqa, il*n_ctx*ggml_element_size(kv_self.k)*n_embd_gqa),
  1391. n_embd_head, n_head_kv, n_past + N),
  1392. 0, 2, 1, 3);
  1393. offload_func_kq(K);
  1394. ggml_set_name(K, "K");
  1395. // K * Q
  1396. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  1397. offload_func_kq(KQ);
  1398. ggml_set_name(KQ, "KQ");
  1399. // KQ_scaled = KQ / sqrt(n_embd_head)
  1400. // KQ_scaled shape [n_past + N, N, n_head, 1]
  1401. struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale);
  1402. offload_func_kq(KQ_scaled);
  1403. ggml_set_name(KQ_scaled, "KQ_scaled");
  1404. // KQ_masked = mask_past(KQ_scaled)
  1405. struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
  1406. offload_func_kq(KQ_masked);
  1407. ggml_set_name(KQ_masked, "KQ_masked");
  1408. // KQ = soft_max(KQ_masked)
  1409. struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
  1410. offload_func_v(KQ_soft_max);
  1411. ggml_set_name(KQ_soft_max, "KQ_soft_max");
  1412. // split cached V into n_head heads
  1413. struct ggml_tensor * V =
  1414. ggml_view_3d(ctx0, kv_self.v,
  1415. n_past + N, n_embd_head, n_head_kv,
  1416. n_ctx*ggml_element_size(kv_self.v),
  1417. n_ctx*ggml_element_size(kv_self.v)*n_embd_head,
  1418. n_ctx*ggml_element_size(kv_self.v)*n_embd_gqa*il);
  1419. offload_func_v(V);
  1420. ggml_set_name(V, "V");
  1421. #if 1
  1422. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
  1423. offload_func_v(KQV);
  1424. ggml_set_name(KQV, "KQV");
  1425. #else
  1426. // make V contiguous in memory to speed up the matmul, however we waste time on the copy
  1427. // on M1 this is faster for the perplexity computation, but ~5% slower for the single-token generation
  1428. // is there a better way?
  1429. struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd_head, n_head));
  1430. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_cont, KQ_soft_max);
  1431. #endif
  1432. // KQV_merged = KQV.permute(0, 2, 1, 3)
  1433. struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  1434. offload_func_v(KQV_merged);
  1435. ggml_set_name(KQV_merged, "KQV_merged");
  1436. // cur = KQV_merged.contiguous().view(n_embd, N)
  1437. cur = ggml_cpy(ctx0,
  1438. KQV_merged,
  1439. ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
  1440. offload_func_v(cur);
  1441. ggml_set_name(cur, "KQV_merged_contiguous");
  1442. // projection (no bias)
  1443. cur = ggml_mul_mat(ctx0,
  1444. model.layers[il].wo,
  1445. cur);
  1446. offload_func(cur);
  1447. ggml_set_name(cur, "result_wo");
  1448. }
  1449. lctx.use_buf(ctx0, 1);
  1450. struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
  1451. offload_func(inpFF);
  1452. ggml_set_name(inpFF, "inpFF");
  1453. // feed-forward network
  1454. {
  1455. // norm
  1456. {
  1457. cur = ggml_rms_norm(ctx0, inpFF, rms_norm_eps);
  1458. offload_func(cur);
  1459. ggml_set_name(cur, "rms_norm_1");
  1460. // cur = cur*ffn_norm(broadcasted)
  1461. cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm);
  1462. offload_func(cur);
  1463. ggml_set_name(cur, "ffn_norm");
  1464. }
  1465. struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
  1466. model.layers[il].w3,
  1467. cur);
  1468. offload_func(tmp);
  1469. ggml_set_name(tmp, "result_w3");
  1470. cur = ggml_mul_mat(ctx0,
  1471. model.layers[il].w1,
  1472. cur);
  1473. offload_func(cur);
  1474. ggml_set_name(cur, "result_w1");
  1475. // SILU activation
  1476. cur = ggml_silu(ctx0, cur);
  1477. offload_func(cur);
  1478. ggml_set_name(cur, "silu");
  1479. cur = ggml_mul(ctx0, cur, tmp);
  1480. offload_func(cur);
  1481. ggml_set_name(cur, "silu_x_result_w3");
  1482. cur = ggml_mul_mat(ctx0,
  1483. model.layers[il].w2,
  1484. cur);
  1485. offload_func(cur);
  1486. ggml_set_name(cur, "result_w2");
  1487. }
  1488. cur = ggml_add(ctx0, cur, inpFF);
  1489. offload_func(cur);
  1490. ggml_set_name(cur, "inpFF_+_result_w2");
  1491. // input for next layer
  1492. inpL = cur;
  1493. }
  1494. lctx.use_buf(ctx0, 0);
  1495. // norm
  1496. {
  1497. cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps);
  1498. offload_func_nr(cur);
  1499. ggml_set_name(cur, "rms_norm_2");
  1500. // cur = cur*norm(broadcasted)
  1501. cur = ggml_mul(ctx0, cur, model.norm);
  1502. // offload_func_nr(cur); // TODO CPU + GPU mirrored backend
  1503. ggml_set_name(cur, "result_norm");
  1504. }
  1505. // lm_head
  1506. cur = ggml_mul_mat(ctx0, model.output, cur);
  1507. ggml_set_name(cur, "result_output");
  1508. lctx.use_buf(ctx0, -1);
  1509. // logits -> probs
  1510. //cur = ggml_soft_max_inplace(ctx0, cur);
  1511. ggml_build_forward_expand(gf, cur);
  1512. if (mem_per_token == 0) {
  1513. mem_per_token = ggml_used_mem(ctx0)/N;
  1514. }
  1515. #if 0
  1516. LLAMA_LOG_INFO("\n%s: used_mem: eval ctx %.3f MB, scratch %.3f MB %.3f MB, work buf %.3f MB, n_past = %d, N = %d\n", __func__,
  1517. ggml_used_mem(ctx0)/1024.0/1024.0,
  1518. lctx.get_buf_max_mem(0)/1024.0/1024.0,
  1519. lctx.get_buf_max_mem(1)/1024.0/1024.0,
  1520. lctx.work_buffer.size()/1024.0/1024.0,
  1521. n_past, N);
  1522. #endif
  1523. ggml_free(ctx0);
  1524. return gf;
  1525. }
  1526. // evaluate the transformer
  1527. //
  1528. // - lctx: llama context
  1529. // - tokens: new batch of tokens to process
  1530. // - embd embeddings input
  1531. // - n_tokens number of tokens
  1532. // - n_past: the context size so far
  1533. // - n_threads: number of threads to use
  1534. //
  1535. static bool llama_eval_internal(
  1536. llama_context & lctx,
  1537. const llama_token * tokens,
  1538. const float * embd,
  1539. int n_tokens,
  1540. int n_past,
  1541. int n_threads,
  1542. const char * cgraph_fname) {
  1543. LLAMA_ASSERT((!tokens && embd) || (tokens && !embd));
  1544. const int64_t t_start_us = ggml_time_us();
  1545. #ifdef GGML_USE_MPI
  1546. ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
  1547. #endif
  1548. const int N = n_tokens;
  1549. const auto & model = lctx.model;
  1550. const auto & hparams = model.hparams;
  1551. const auto & kv_self = lctx.kv_self;
  1552. LLAMA_ASSERT(!!kv_self.ctx);
  1553. const int64_t n_embd = hparams.n_embd;
  1554. const int64_t n_vocab = hparams.n_vocab;
  1555. #ifdef LLAMA_USE_ALLOCATOR
  1556. ggml_allocr_reset(lctx.alloc);
  1557. #endif
  1558. ggml_cgraph * gf = llama_build_graph(lctx, tokens, embd, n_tokens, n_past);
  1559. #ifdef LLAMA_USE_ALLOCATOR
  1560. ggml_allocr_alloc_graph(lctx.alloc, gf);
  1561. #endif
  1562. // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
  1563. // for big prompts, if BLAS is enabled, it is better to use only one thread
  1564. // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
  1565. n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
  1566. struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
  1567. struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
  1568. LLAMA_ASSERT(strcmp(res->name, "result_output") == 0);
  1569. LLAMA_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
  1570. #if GGML_USE_MPI
  1571. const int64_t n_layer = hparams.n_layer;
  1572. ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);
  1573. #endif
  1574. #ifdef GGML_USE_METAL
  1575. if (lctx.ctx_metal && N == 1) {
  1576. // TODO: disabled until #2413 is resolved
  1577. //if (!ggml_metal_if_optimized(lctx.ctx_metal)) {
  1578. // ggml_metal_graph_find_concurrency(lctx.ctx_metal, gf);
  1579. //}
  1580. ggml_metal_set_n_cb (lctx.ctx_metal, n_threads);
  1581. ggml_metal_graph_compute(lctx.ctx_metal, gf);
  1582. ggml_metal_get_tensor (lctx.ctx_metal, res);
  1583. if (!lctx.embedding.empty()) {
  1584. ggml_metal_get_tensor(lctx.ctx_metal, embeddings);
  1585. }
  1586. } else {
  1587. // IMPORTANT:
  1588. // Since we don't have efficient Matrix x Matrix Metal multiplication yet, we fallback to vanilla
  1589. // ggml_graph_compute(). It uses Apple's Accelerate CBLAS API which takes advantage of the ANE or the AMX
  1590. // coprocessor.
  1591. //
  1592. // When we implement Matrix x Matrix Metal multiplication, we can avoid this branch.
  1593. // But for now, we have focused only on Matrix x Vector Metal multiplication.
  1594. //
  1595. // TODO: avoid these syncs via shared memory (ref #1696)
  1596. //
  1597. if (lctx.ctx_metal) {
  1598. // We need to sync the GPU KV cache with the CPU KV cache
  1599. ggml_metal_get_tensor(lctx.ctx_metal, kv_self.k);
  1600. ggml_metal_get_tensor(lctx.ctx_metal, kv_self.v);
  1601. }
  1602. ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
  1603. }
  1604. #else
  1605. ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
  1606. #endif
  1607. #if GGML_USE_MPI
  1608. ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer);
  1609. #endif
  1610. // update kv token count
  1611. lctx.kv_self.n = n_past + N;
  1612. if (cgraph_fname) {
  1613. ggml_graph_export(gf, cgraph_fname);
  1614. }
  1615. #ifdef GGML_PERF
  1616. // print timing information per ggml operation (for debugging purposes)
  1617. // requires GGML_PERF to be defined
  1618. ggml_graph_print(gf);
  1619. #endif
  1620. // plot the computation graph in dot format (for debugging purposes)
  1621. //if (n_past%100 == 0) {
  1622. // ggml_graph_dump_dot(gf, NULL, "llama.dot");
  1623. //}
  1624. // extract logits
  1625. {
  1626. auto & logits_out = lctx.logits;
  1627. if (lctx.logits_all) {
  1628. logits_out.resize(n_vocab * N);
  1629. memcpy(logits_out.data(), (float *) ggml_get_data(res), sizeof(float)*n_vocab*N);
  1630. } else {
  1631. // return result for just the last token
  1632. logits_out.resize(n_vocab);
  1633. memcpy(logits_out.data(), (float *) ggml_get_data(res) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
  1634. }
  1635. }
  1636. // extract embeddings
  1637. if (!lctx.embedding.empty()) {
  1638. auto & embedding_out = lctx.embedding;
  1639. embedding_out.resize(n_embd);
  1640. memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
  1641. }
  1642. // measure the performance only for the single-token evals
  1643. if (N == 1) {
  1644. lctx.t_eval_us += ggml_time_us() - t_start_us;
  1645. lctx.n_eval++;
  1646. }
  1647. else if (N > 1) {
  1648. lctx.t_p_eval_us += ggml_time_us() - t_start_us;
  1649. lctx.n_p_eval += N;
  1650. }
  1651. return true;
  1652. }
  1653. //
  1654. // tokenizer
  1655. //
  1656. static size_t utf8_len(char src) {
  1657. const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  1658. uint8_t highbits = static_cast<uint8_t>(src) >> 4;
  1659. return lookup[highbits];
  1660. }
  1661. struct llama_sp_symbol {
  1662. using index = int;
  1663. index prev;
  1664. index next;
  1665. const char * text;
  1666. size_t n;
  1667. };
  1668. static_assert(std::is_trivially_copyable<llama_sp_symbol>::value, "llama_sp_symbol is not trivially copyable");
  1669. struct llama_sp_bigram {
  1670. struct comparator {
  1671. bool operator()(llama_sp_bigram & l, llama_sp_bigram & r) {
  1672. return (l.score < r.score) || (l.score == r.score && l.left > r.left);
  1673. }
  1674. };
  1675. using queue_storage = std::vector<llama_sp_bigram>;
  1676. using queue = std::priority_queue<llama_sp_bigram, queue_storage, comparator>;
  1677. llama_sp_symbol::index left;
  1678. llama_sp_symbol::index right;
  1679. float score;
  1680. size_t size;
  1681. };
  1682. // original implementation:
  1683. // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
  1684. struct llama_tokenizer {
  1685. llama_tokenizer(const llama_vocab & vocab): vocab_(vocab) {}
  1686. void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
  1687. // split string into utf8 chars
  1688. int index = 0;
  1689. size_t offs = 0;
  1690. while (offs < text.size()) {
  1691. llama_sp_symbol sym;
  1692. size_t char_len = std::min(text.size() - offs, utf8_len(text[offs]));
  1693. sym.text = text.c_str() + offs;
  1694. sym.n = char_len;
  1695. offs += char_len;
  1696. sym.prev = index - 1;
  1697. sym.next = offs == text.size() ? -1 : index + 1;
  1698. index++;
  1699. symbols_.emplace_back(sym);
  1700. }
  1701. // seed the work queue with all possible 2-character tokens.
  1702. for (size_t i = 1; i < symbols_.size(); ++i) {
  1703. try_add_bigram(i - 1, i);
  1704. }
  1705. // keep substituting the highest frequency pairs for as long as we can.
  1706. while (!work_queue_.empty()) {
  1707. auto bigram = work_queue_.top();
  1708. work_queue_.pop();
  1709. auto & left_sym = symbols_[bigram.left];
  1710. auto & right_sym = symbols_[bigram.right];
  1711. // if one of the symbols already got merged, skip it.
  1712. if (left_sym.n == 0 || right_sym.n == 0 ||
  1713. left_sym.n + right_sym.n != bigram.size) {
  1714. continue;
  1715. }
  1716. // merge the right sym into the left one
  1717. left_sym.n += right_sym.n;
  1718. right_sym.n = 0;
  1719. //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
  1720. // remove the right sym from the chain
  1721. left_sym.next = right_sym.next;
  1722. if (right_sym.next >= 0) {
  1723. symbols_[right_sym.next].prev = bigram.left;
  1724. }
  1725. // find more substitutions
  1726. try_add_bigram(left_sym.prev, bigram.left);
  1727. try_add_bigram(bigram.left, left_sym.next);
  1728. }
  1729. for (int i = 0; i != -1; i = symbols_[i].next) {
  1730. auto & symbol = symbols_[i];
  1731. auto token = vocab_.token_to_id.find(std::string(symbol.text, symbol.n));
  1732. if (token == vocab_.token_to_id.end()) {
  1733. // output any symbols that did not form tokens as bytes.
  1734. for (int j = 0; j < (int) symbol.n; ++j) {
  1735. // NOTE: old version, before #2420 - not sure what are the implications of this
  1736. //llama_vocab::id token_id = static_cast<uint8_t>(symbol.text[j]) + 3;
  1737. llama_vocab::id token_id = vocab_.token_to_id.at(std::string(1, symbol.text[j]));
  1738. output.push_back(token_id);
  1739. }
  1740. } else {
  1741. output.push_back((*token).second);
  1742. }
  1743. }
  1744. }
  1745. private:
  1746. void try_add_bigram(int left, int right) {
  1747. if (left == -1 || right == -1) {
  1748. return;
  1749. }
  1750. const std::string text = std::string(symbols_[left].text, symbols_[left].n + symbols_[right].n);
  1751. auto token = vocab_.token_to_id.find(text);
  1752. if (token == vocab_.token_to_id.end()) {
  1753. return;
  1754. }
  1755. if (static_cast<size_t>((*token).second) >= vocab_.id_to_token.size()) {
  1756. return;
  1757. }
  1758. const auto &tok_score = vocab_.id_to_token[(*token).second];
  1759. llama_sp_bigram bigram;
  1760. bigram.left = left;
  1761. bigram.right = right;
  1762. bigram.score = tok_score.score;
  1763. bigram.size = text.size();
  1764. work_queue_.push(bigram);
  1765. }
  1766. const llama_vocab & vocab_;
  1767. std::vector<llama_sp_symbol> symbols_;
  1768. llama_sp_bigram::queue work_queue_;
  1769. };
  1770. static std::vector<llama_vocab::id> llama_tokenize(const llama_vocab & vocab, const std::string & text, bool bos) {
  1771. llama_tokenizer tokenizer(vocab);
  1772. std::vector<llama_vocab::id> output;
  1773. if (text.empty()) {
  1774. return output;
  1775. }
  1776. if (bos) {
  1777. output.push_back(llama_token_bos());
  1778. }
  1779. tokenizer.tokenize(text, output);
  1780. return output;
  1781. }
  1782. //
  1783. // grammar - internal
  1784. //
  1785. struct llama_grammar {
  1786. const std::vector<std::vector<llama_grammar_element>> rules;
  1787. std::vector<std::vector<const llama_grammar_element *>> stacks;
  1788. };
  1789. struct llama_grammar_candidate {
  1790. size_t index;
  1791. const uint32_t * code_points;
  1792. };
  1793. // NOTE: assumes valid utf8 (but checks for overrun)
  1794. // adds a terminating 0 for use as pointer
  1795. std::vector<uint32_t> decode_utf8(const char * src) {
  1796. static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
  1797. const char * pos = src;
  1798. std::vector<uint32_t> code_points;
  1799. while (*pos != 0) {
  1800. uint8_t first_byte = static_cast<uint8_t>(*pos);
  1801. uint8_t highbits = first_byte >> 4;
  1802. int len = lookup[highbits];
  1803. uint8_t mask = (1 << (8 - len)) - 1;
  1804. uint32_t value = first_byte & mask;
  1805. const char * end = pos + len; // may overrun!
  1806. ++pos;
  1807. for ( ; pos < end && *pos != 0; ++pos) {
  1808. value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
  1809. }
  1810. code_points.push_back(value);
  1811. }
  1812. code_points.push_back(0);
  1813. return code_points;
  1814. }
  1815. // returns true iff pos points to the end of one of the definitions of a rule
  1816. static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) {
  1817. switch (pos->type) {
  1818. case LLAMA_GRETYPE_END: return true;
  1819. case LLAMA_GRETYPE_ALT: return true;
  1820. default: return false;
  1821. }
  1822. }
  1823. // returns true iff chr satisfies the char range at pos (regular or inverse range)
  1824. // asserts that pos is pointing to a char range element
  1825. static std::pair<bool, const llama_grammar_element *> llama_grammar_match_char(
  1826. const llama_grammar_element * pos,
  1827. const uint32_t chr) {
  1828. bool found = false;
  1829. bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
  1830. LLAMA_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT);
  1831. do {
  1832. if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
  1833. // inclusive range, e.g. [a-z]
  1834. found = found || (pos->value <= chr && chr <= pos[1].value);
  1835. pos += 2;
  1836. } else {
  1837. // exact char match, e.g. [a] or "a"
  1838. found = found || pos->value == chr;
  1839. pos += 1;
  1840. }
  1841. } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
  1842. return std::make_pair(found == is_positive_char, pos);
  1843. }
  1844. // transforms a grammar pushdown stack into N possible stacks, all ending
  1845. // at a character range (terminal element)
  1846. static void llama_grammar_advance_stack(
  1847. const std::vector<std::vector<llama_grammar_element>> & rules,
  1848. const std::vector<const llama_grammar_element *> & stack,
  1849. std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
  1850. if (stack.empty()) {
  1851. new_stacks.push_back(stack);
  1852. return;
  1853. }
  1854. const llama_grammar_element * pos = stack.back();
  1855. switch (pos->type) {
  1856. case LLAMA_GRETYPE_RULE_REF: {
  1857. const size_t rule_id = static_cast<size_t>(pos->value);
  1858. const llama_grammar_element * subpos = rules[rule_id].data();
  1859. do {
  1860. // init new stack without the top (pos)
  1861. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  1862. if (!llama_grammar_is_end_of_sequence(pos + 1)) {
  1863. // if this rule ref is followed by another element, add that to stack
  1864. new_stack.push_back(pos + 1);
  1865. }
  1866. if (!llama_grammar_is_end_of_sequence(subpos)) {
  1867. // if alternate is nonempty, add to stack
  1868. new_stack.push_back(subpos);
  1869. }
  1870. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  1871. while (!llama_grammar_is_end_of_sequence(subpos)) {
  1872. // scan to end of alternate def
  1873. subpos++;
  1874. }
  1875. if (subpos->type == LLAMA_GRETYPE_ALT) {
  1876. // there's another alternate def of this rule to process
  1877. subpos++;
  1878. } else {
  1879. break;
  1880. }
  1881. } while (true);
  1882. break;
  1883. }
  1884. case LLAMA_GRETYPE_CHAR:
  1885. case LLAMA_GRETYPE_CHAR_NOT:
  1886. new_stacks.push_back(stack);
  1887. break;
  1888. default:
  1889. // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range
  1890. // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on
  1891. // those
  1892. LLAMA_ASSERT(false);
  1893. }
  1894. }
  1895. // takes a set of possible pushdown stacks on a grammar, which are required to
  1896. // be positioned at a character range (see `llama_grammar_advance_stack`), and
  1897. // produces the N possible stacks if the given char is accepted at those
  1898. // positions
  1899. static std::vector<std::vector<const llama_grammar_element *>> llama_grammar_accept(
  1900. const std::vector<std::vector<llama_grammar_element>> & rules,
  1901. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  1902. const uint32_t chr) {
  1903. std::vector<std::vector<const llama_grammar_element *>> new_stacks;
  1904. for (const auto & stack : stacks) {
  1905. if (stack.empty()) {
  1906. continue;
  1907. }
  1908. auto match = llama_grammar_match_char(stack.back(), chr);
  1909. if (match.first) {
  1910. const llama_grammar_element * pos = match.second;
  1911. // update top of stack to next element, if any
  1912. std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
  1913. if (!llama_grammar_is_end_of_sequence(pos)) {
  1914. new_stack.push_back(pos);
  1915. }
  1916. llama_grammar_advance_stack(rules, new_stack, new_stacks);
  1917. }
  1918. }
  1919. return new_stacks;
  1920. }
  1921. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  1922. const std::vector<std::vector<llama_grammar_element>> & rules,
  1923. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  1924. const std::vector<llama_grammar_candidate> & candidates);
  1925. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
  1926. const std::vector<std::vector<llama_grammar_element>> & rules,
  1927. const std::vector<const llama_grammar_element *> & stack,
  1928. const std::vector<llama_grammar_candidate> & candidates) {
  1929. std::vector<llama_grammar_candidate> rejects;
  1930. if (stack.empty()) {
  1931. // accept nothing; EOS is handled elsewhere
  1932. rejects.insert(rejects.end(), candidates.begin(), candidates.end());
  1933. return rejects;
  1934. }
  1935. const llama_grammar_element * stack_pos = stack.back();
  1936. std::vector<llama_grammar_candidate> next_candidates;
  1937. for (auto tok : candidates) {
  1938. if (llama_grammar_match_char(stack_pos, tok.code_points[0]).first) {
  1939. if (tok.code_points[1] != 0) {
  1940. next_candidates.push_back({ tok.index, tok.code_points + 1 });
  1941. }
  1942. } else {
  1943. rejects.push_back(tok);
  1944. }
  1945. }
  1946. auto stack_pos_after = llama_grammar_match_char(stack_pos, 0).second;
  1947. // update top of stack to next element, if any
  1948. std::vector<const llama_grammar_element *> stack_after(stack.begin(), stack.end() - 1);
  1949. if (!llama_grammar_is_end_of_sequence(stack_pos_after)) {
  1950. stack_after.push_back(stack_pos_after);
  1951. }
  1952. std::vector<std::vector<const llama_grammar_element *>> next_stacks;
  1953. llama_grammar_advance_stack(rules, stack_after, next_stacks);
  1954. auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
  1955. for (auto tok : next_rejects) {
  1956. rejects.push_back({ tok.index, tok.code_points - 1 });
  1957. }
  1958. return rejects;
  1959. }
  1960. static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
  1961. const std::vector<std::vector<llama_grammar_element>> & rules,
  1962. const std::vector<std::vector<const llama_grammar_element *>> & stacks,
  1963. const std::vector<llama_grammar_candidate> & candidates) {
  1964. LLAMA_ASSERT(!stacks.empty()); // REVIEW
  1965. if (candidates.empty()) {
  1966. return std::vector<llama_grammar_candidate>();
  1967. }
  1968. auto rejects = llama_grammar_reject_candidates_for_stack(rules, stacks.front(), candidates);
  1969. for (size_t i = 1, size = stacks.size(); i < size; ++i) {
  1970. rejects = llama_grammar_reject_candidates_for_stack(rules, stacks[i], rejects);
  1971. }
  1972. return rejects;
  1973. }
  1974. //
  1975. // grammar - external
  1976. //
  1977. struct llama_grammar * llama_grammar_init(
  1978. const llama_grammar_element ** rules,
  1979. size_t n_rules,
  1980. size_t start_rule_index) {
  1981. const llama_grammar_element * pos;
  1982. // copy rule definitions into vectors
  1983. std::vector<std::vector<llama_grammar_element>> vec_rules(n_rules);
  1984. for (size_t i = 0; i < n_rules; i++) {
  1985. for (pos = rules[i]; pos->type != LLAMA_GRETYPE_END; pos++) {
  1986. vec_rules[i].push_back(*pos);
  1987. }
  1988. vec_rules[i].push_back({LLAMA_GRETYPE_END, 0});
  1989. }
  1990. // loop over alternates of start rule to build initial stacks
  1991. std::vector<std::vector<const llama_grammar_element *>> stacks;
  1992. pos = rules[start_rule_index];
  1993. do {
  1994. std::vector<const llama_grammar_element *> stack;
  1995. if (!llama_grammar_is_end_of_sequence(pos)) {
  1996. // if alternate is nonempty, add to stack
  1997. stack.push_back(pos);
  1998. }
  1999. llama_grammar_advance_stack(vec_rules, stack, stacks);
  2000. while (!llama_grammar_is_end_of_sequence(pos)) {
  2001. // scan to end of alternate def
  2002. pos++;
  2003. }
  2004. if (pos->type == LLAMA_GRETYPE_ALT) {
  2005. // there's another alternate def of this rule to process
  2006. pos++;
  2007. } else {
  2008. break;
  2009. }
  2010. } while (true);
  2011. return new llama_grammar{ std::move(vec_rules), std::move(stacks) };
  2012. }
  2013. void llama_grammar_free(struct llama_grammar * grammar) {
  2014. delete grammar;
  2015. }
  2016. //
  2017. // sampling
  2018. //
  2019. void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
  2020. assert(candidates->size > 0);
  2021. const int64_t t_start_sample_us = ggml_time_us();
  2022. // Sort the logits in descending order
  2023. if (!candidates->sorted) {
  2024. std::sort(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  2025. return a.logit > b.logit;
  2026. });
  2027. candidates->sorted = true;
  2028. }
  2029. float max_l = candidates->data[0].logit;
  2030. float cum_sum = 0.0f;
  2031. for (size_t i = 0; i < candidates->size; ++i) {
  2032. float p = expf(candidates->data[i].logit - max_l);
  2033. candidates->data[i].p = p;
  2034. cum_sum += p;
  2035. }
  2036. for (size_t i = 0; i < candidates->size; ++i) {
  2037. candidates->data[i].p /= cum_sum;
  2038. }
  2039. if (ctx) {
  2040. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2041. }
  2042. }
  2043. void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep) {
  2044. const int64_t t_start_sample_us = ggml_time_us();
  2045. k = std::max(k, (int) min_keep);
  2046. k = std::min(k, (int) candidates->size);
  2047. // Sort scores in descending order
  2048. if (!candidates->sorted) {
  2049. auto comp = [](const llama_token_data & a, const llama_token_data & b) {
  2050. return a.logit > b.logit;
  2051. };
  2052. if (k == (int) candidates->size) {
  2053. std::sort(candidates->data, candidates->data + candidates->size, comp);
  2054. } else {
  2055. std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp);
  2056. }
  2057. candidates->sorted = true;
  2058. }
  2059. candidates->size = k;
  2060. if (ctx) {
  2061. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2062. }
  2063. }
  2064. void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  2065. if (p >= 1.0f) {
  2066. return;
  2067. }
  2068. llama_sample_softmax(ctx, candidates);
  2069. const int64_t t_start_sample_us = ggml_time_us();
  2070. // Compute the cumulative probabilities
  2071. float cum_sum = 0.0f;
  2072. size_t last_idx = candidates->size;
  2073. for (size_t i = 0; i < candidates->size; ++i) {
  2074. cum_sum += candidates->data[i].p;
  2075. // Check if the running sum is at least p or if we have kept at least min_keep tokens
  2076. // we set the last index to i+1 to indicate that the current iterate should be included in the set
  2077. if (cum_sum >= p && i + 1 >= min_keep) {
  2078. last_idx = i + 1;
  2079. break;
  2080. }
  2081. }
  2082. // Resize the output vector to keep only the top-p tokens
  2083. candidates->size = last_idx;
  2084. if (ctx) {
  2085. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2086. }
  2087. }
  2088. void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) {
  2089. if (z >= 1.0f || candidates->size <= 2) {
  2090. return;
  2091. }
  2092. llama_sample_softmax(nullptr, candidates);
  2093. const int64_t t_start_sample_us = ggml_time_us();
  2094. // Compute the first and second derivatives
  2095. std::vector<float> first_derivatives(candidates->size - 1);
  2096. std::vector<float> second_derivatives(candidates->size - 2);
  2097. for (size_t i = 0; i < first_derivatives.size(); ++i) {
  2098. first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p;
  2099. }
  2100. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  2101. second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1];
  2102. }
  2103. // Calculate absolute value of second derivatives
  2104. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  2105. second_derivatives[i] = abs(second_derivatives[i]);
  2106. }
  2107. // Normalize the second derivatives
  2108. {
  2109. const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
  2110. if (second_derivatives_sum > 1e-6f) {
  2111. for (float & value : second_derivatives) {
  2112. value /= second_derivatives_sum;
  2113. }
  2114. } else {
  2115. for (float & value : second_derivatives) {
  2116. value = 1.0f / second_derivatives.size();
  2117. }
  2118. }
  2119. }
  2120. float cum_sum = 0.0f;
  2121. size_t last_idx = candidates->size;
  2122. for (size_t i = 0; i < second_derivatives.size(); ++i) {
  2123. cum_sum += second_derivatives[i];
  2124. // Check if the running sum is greater than z or if we have kept at least min_keep tokens
  2125. if (cum_sum > z && i >= min_keep) {
  2126. last_idx = i;
  2127. break;
  2128. }
  2129. }
  2130. // Resize the output vector to keep only the tokens above the tail location
  2131. candidates->size = last_idx;
  2132. if (ctx) {
  2133. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2134. }
  2135. }
  2136. void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
  2137. // Reference implementation:
  2138. // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
  2139. if (p >= 1.0f) {
  2140. return;
  2141. }
  2142. // Compute the softmax of logits and calculate entropy
  2143. llama_sample_softmax(nullptr, candidates);
  2144. const int64_t t_start_sample_us = ggml_time_us();
  2145. float entropy = 0.0f;
  2146. for (size_t i = 0; i < candidates->size; ++i) {
  2147. entropy += -candidates->data[i].p * logf(candidates->data[i].p);
  2148. }
  2149. // Compute the absolute difference between negative log probability and entropy for each candidate
  2150. std::vector<float> shifted_scores;
  2151. for (size_t i = 0; i < candidates->size; ++i) {
  2152. float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy);
  2153. shifted_scores.push_back(shifted_score);
  2154. }
  2155. // Sort tokens based on the shifted_scores and their corresponding indices
  2156. std::vector<size_t> indices(candidates->size);
  2157. std::iota(indices.begin(), indices.end(), 0);
  2158. std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) {
  2159. return shifted_scores[a] < shifted_scores[b];
  2160. });
  2161. // Compute the cumulative probabilities
  2162. float cum_sum = 0.0f;
  2163. size_t last_idx = indices.size();
  2164. for (size_t i = 0; i < indices.size(); ++i) {
  2165. size_t idx = indices[i];
  2166. cum_sum += candidates->data[idx].p;
  2167. // Check if the running sum is greater than typical or if we have kept at least min_keep tokens
  2168. if (cum_sum > p && i >= min_keep - 1) {
  2169. last_idx = i + 1;
  2170. break;
  2171. }
  2172. }
  2173. // Resize the output vector to keep only the locally typical tokens
  2174. std::vector<llama_token_data> new_candidates;
  2175. for (size_t i = 0; i < last_idx; ++i) {
  2176. size_t idx = indices[i];
  2177. new_candidates.push_back(candidates->data[idx]);
  2178. }
  2179. // Replace the data in candidates with the new_candidates data
  2180. std::copy(new_candidates.begin(), new_candidates.end(), candidates->data);
  2181. candidates->size = new_candidates.size();
  2182. if (ctx) {
  2183. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2184. }
  2185. }
  2186. void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
  2187. const int64_t t_start_sample_us = ggml_time_us();
  2188. for (size_t i = 0; i < candidates_p->size; ++i) {
  2189. candidates_p->data[i].logit /= temp;
  2190. }
  2191. if (ctx) {
  2192. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2193. }
  2194. }
  2195. void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty) {
  2196. if (last_tokens_size == 0 || penalty == 1.0f) {
  2197. return;
  2198. }
  2199. const int64_t t_start_sample_us = ggml_time_us();
  2200. for (size_t i = 0; i < candidates->size; ++i) {
  2201. const auto * token_iter = std::find(last_tokens, last_tokens + last_tokens_size, candidates->data[i].id);
  2202. if (token_iter == last_tokens + last_tokens_size) {
  2203. continue;
  2204. }
  2205. // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
  2206. // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
  2207. if (candidates->data[i].logit <= 0) {
  2208. candidates->data[i].logit *= penalty;
  2209. } else {
  2210. candidates->data[i].logit /= penalty;
  2211. }
  2212. }
  2213. candidates->sorted = false;
  2214. if (ctx) {
  2215. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2216. }
  2217. }
  2218. void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens_p, size_t last_tokens_size, float alpha_frequency, float alpha_presence) {
  2219. if (last_tokens_size == 0 || (alpha_frequency == 0.0f && alpha_presence == 0.0f)) {
  2220. return;
  2221. }
  2222. const int64_t t_start_sample_us = ggml_time_us();
  2223. // Create a frequency map to count occurrences of each token in last_tokens
  2224. std::unordered_map<llama_token, int> token_count;
  2225. for (size_t i = 0; i < last_tokens_size; ++i) {
  2226. token_count[last_tokens_p[i]]++;
  2227. }
  2228. // Apply frequency and presence penalties to the candidates
  2229. for (size_t i = 0; i < candidates->size; ++i) {
  2230. auto token_iter = token_count.find(candidates->data[i].id);
  2231. if (token_iter == token_count.end()) {
  2232. continue;
  2233. }
  2234. int count = token_iter->second;
  2235. candidates->data[i].logit -= float(count) * alpha_frequency + float(count > 0) * alpha_presence;
  2236. }
  2237. candidates->sorted = false;
  2238. if (ctx) {
  2239. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2240. }
  2241. }
  2242. void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) {
  2243. assert(ctx);
  2244. const int64_t t_start_sample_us = ggml_time_us();
  2245. bool allow_eos = false;
  2246. for (const auto & stack : grammar->stacks) {
  2247. if (stack.empty()) {
  2248. allow_eos = true;
  2249. break;
  2250. }
  2251. }
  2252. const llama_token eos = llama_token_eos();
  2253. std::vector<std::vector<uint32_t>> candidates_decoded;
  2254. std::vector<llama_grammar_candidate> candidates_grammar;
  2255. for (size_t i = 0; i < candidates->size; ++i) {
  2256. const llama_token id = candidates->data[i].id;
  2257. const char * str = llama_token_to_str(ctx, id);
  2258. if (id == eos) {
  2259. if (!allow_eos) {
  2260. candidates->data[i].logit = -INFINITY;
  2261. }
  2262. } else if (*str == 0) {
  2263. candidates->data[i].logit = -INFINITY;
  2264. } else {
  2265. candidates_decoded.push_back(decode_utf8(str));
  2266. candidates_grammar.push_back({ i, candidates_decoded.back().data() });
  2267. }
  2268. }
  2269. const auto rejects =
  2270. llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar);
  2271. for (auto & reject : rejects) {
  2272. candidates->data[reject.index].logit = -INFINITY;
  2273. }
  2274. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2275. }
  2276. static void llama_log_softmax(float * array, size_t size) {
  2277. float max_l = *std::max_element(array, array + size);
  2278. float sum = 0.f;
  2279. for (size_t i = 0; i < size; ++i) {
  2280. float p = expf(array[i] - max_l);
  2281. sum += p;
  2282. array[i] = p;
  2283. }
  2284. for (size_t i = 0; i < size; ++i) {
  2285. array[i] = logf(array[i] / sum);
  2286. }
  2287. }
  2288. void llama_sample_classifier_free_guidance(
  2289. struct llama_context * ctx,
  2290. llama_token_data_array * candidates,
  2291. struct llama_context * guidance_ctx,
  2292. float scale) {
  2293. int64_t t_start_sample_us = ggml_time_us();
  2294. assert(ctx);
  2295. auto n_vocab = llama_n_vocab(ctx);
  2296. assert(n_vocab == (int)candidates->size);
  2297. assert(!candidates->sorted);
  2298. std::vector<float> logits_base;
  2299. logits_base.reserve(candidates->size);
  2300. for (size_t i = 0; i < candidates->size; ++i) {
  2301. logits_base.push_back(candidates->data[i].logit);
  2302. }
  2303. llama_log_softmax(logits_base.data(), candidates->size);
  2304. float* logits_guidance = llama_get_logits(guidance_ctx);
  2305. llama_log_softmax(logits_guidance, n_vocab);
  2306. for (int i = 0; i < n_vocab; ++i) {
  2307. float logit_guidance = logits_guidance[i];
  2308. float logit_base = logits_base[i];
  2309. candidates->data[i].logit = scale * (logit_base - logit_guidance) + logit_guidance;
  2310. }
  2311. if (ctx) {
  2312. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2313. }
  2314. }
  2315. llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu) {
  2316. assert(ctx);
  2317. auto N = float(llama_n_vocab(ctx));
  2318. int64_t t_start_sample_us;
  2319. t_start_sample_us = ggml_time_us();
  2320. llama_sample_softmax(nullptr, candidates);
  2321. // Estimate s_hat using the most probable m tokens
  2322. float s_hat = 0.0;
  2323. float sum_ti_bi = 0.0;
  2324. float sum_ti_sq = 0.0;
  2325. for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) {
  2326. float t_i = logf(float(i + 2) / float(i + 1));
  2327. float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p);
  2328. sum_ti_bi += t_i * b_i;
  2329. sum_ti_sq += t_i * t_i;
  2330. }
  2331. s_hat = sum_ti_bi / sum_ti_sq;
  2332. // Compute k from the estimated s_hat and target surprise value
  2333. float epsilon_hat = s_hat - 1;
  2334. float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat);
  2335. // Sample the next word X using top-k sampling
  2336. llama_sample_top_k(nullptr, candidates, int(k), 1);
  2337. if (ctx) {
  2338. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2339. }
  2340. llama_token X = llama_sample_token(ctx, candidates);
  2341. t_start_sample_us = ggml_time_us();
  2342. // Compute error as the difference between observed surprise and target surprise value
  2343. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  2344. return candidate.id == X;
  2345. }));
  2346. float observed_surprise = -log2f(candidates->data[X_idx].p);
  2347. float e = observed_surprise - tau;
  2348. // Update mu using the learning rate and error
  2349. *mu = *mu - eta * e;
  2350. if (ctx) {
  2351. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2352. }
  2353. return X;
  2354. }
  2355. llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
  2356. int64_t t_start_sample_us;
  2357. t_start_sample_us = ggml_time_us();
  2358. llama_sample_softmax(ctx, candidates);
  2359. // Truncate the words with surprise values greater than mu
  2360. candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  2361. return -log2f(candidate.p) > *mu;
  2362. }));
  2363. if (candidates->size == 0) {
  2364. candidates->size = 1;
  2365. }
  2366. if (ctx) {
  2367. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2368. }
  2369. // Normalize the probabilities of the remaining words
  2370. llama_sample_softmax(ctx, candidates);
  2371. // Sample the next word X from the remaining words
  2372. llama_token X = llama_sample_token(ctx, candidates);
  2373. t_start_sample_us = ggml_time_us();
  2374. // Compute error as the difference between observed surprise and target surprise value
  2375. size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const llama_token_data & candidate) {
  2376. return candidate.id == X;
  2377. }));
  2378. float observed_surprise = -log2f(candidates->data[X_idx].p);
  2379. float e = observed_surprise - tau;
  2380. // Update mu using the learning rate and error
  2381. *mu = *mu - eta * e;
  2382. if (ctx) {
  2383. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2384. }
  2385. return X;
  2386. }
  2387. llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates) {
  2388. const int64_t t_start_sample_us = ggml_time_us();
  2389. // Find max element
  2390. auto * max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const llama_token_data & a, const llama_token_data & b) {
  2391. return a.logit < b.logit;
  2392. });
  2393. llama_token result = max_iter->id;
  2394. if (ctx) {
  2395. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2396. ctx->n_sample++;
  2397. }
  2398. return result;
  2399. }
  2400. llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) {
  2401. assert(ctx);
  2402. const int64_t t_start_sample_us = ggml_time_us();
  2403. llama_sample_softmax(nullptr, candidates);
  2404. std::vector<float> probs;
  2405. probs.reserve(candidates->size);
  2406. for (size_t i = 0; i < candidates->size; ++i) {
  2407. probs.push_back(candidates->data[i].p);
  2408. }
  2409. std::discrete_distribution<> dist(probs.begin(), probs.end());
  2410. auto & rng = ctx->rng;
  2411. int idx = dist(rng);
  2412. llama_token result = candidates->data[idx].id;
  2413. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2414. ctx->n_sample++;
  2415. return result;
  2416. }
  2417. void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) {
  2418. const int64_t t_start_sample_us = ggml_time_us();
  2419. if (token == llama_token_eos()) {
  2420. for (const auto & stack : grammar->stacks) {
  2421. if (stack.empty()) {
  2422. return;
  2423. }
  2424. }
  2425. LLAMA_ASSERT(false);
  2426. }
  2427. const char * str = llama_token_to_str(ctx, token);
  2428. // Note terminating 0 in decoded string
  2429. auto code_points = decode_utf8(str);
  2430. for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
  2431. grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it);
  2432. }
  2433. LLAMA_ASSERT(!grammar->stacks.empty());
  2434. ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
  2435. }
  2436. //
  2437. // quantization
  2438. //
  2439. static void llama_convert_tensor_internal(const llama_load_tensor & tensor, llama_buffer & output, const int nelements, const int nthread) {
  2440. if (output.size < nelements * sizeof(float)) {
  2441. output.resize(nelements * sizeof(float));
  2442. }
  2443. float * f32_output = (float *) output.addr;
  2444. ggml_type_traits_t qtype;
  2445. if (ggml_is_quantized(tensor.type)) {
  2446. qtype = ggml_internal_get_type_traits(tensor.type);
  2447. if (qtype.to_float == NULL) {
  2448. throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor.type)));
  2449. }
  2450. } else if (tensor.type != GGML_TYPE_F16) {
  2451. throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor.type)));
  2452. }
  2453. if (nthread < 2) {
  2454. if (tensor.type == GGML_TYPE_F16) {
  2455. ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor.data, f32_output, nelements);
  2456. } else if (ggml_is_quantized(tensor.type)) {
  2457. qtype.to_float(tensor.data, f32_output, nelements);
  2458. } else {
  2459. LLAMA_ASSERT(false); // unreachable
  2460. }
  2461. return;
  2462. }
  2463. auto block_size = tensor.type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor.type);
  2464. auto block_size_bytes = ggml_type_size(tensor.type);
  2465. LLAMA_ASSERT(nelements % block_size == 0);
  2466. auto nblocks = nelements / block_size;
  2467. auto blocks_per_thread = nblocks / nthread;
  2468. auto spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
  2469. std::vector<std::thread> workers;
  2470. for (auto tnum = 0, in_buff_offs = 0, out_buff_offs = 0; tnum < nthread; tnum++) {
  2471. auto thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
  2472. auto thr_elems = thr_blocks * block_size; // number of elements for this thread
  2473. auto thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
  2474. auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
  2475. if (typ == GGML_TYPE_F16) {
  2476. ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
  2477. } else {
  2478. qtype.to_float(inbuf, outbuf, nels);
  2479. }
  2480. };
  2481. workers.push_back(std::thread(compute, tensor.type, tensor.data + in_buff_offs, f32_output + out_buff_offs, thr_elems));
  2482. in_buff_offs += thr_block_bytes;
  2483. out_buff_offs += thr_elems;
  2484. }
  2485. for (auto & worker : workers) {
  2486. worker.join();
  2487. }
  2488. }
  2489. static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
  2490. ggml_type quantized_type;
  2491. llama_ftype ftype = params->ftype;
  2492. int nthread = params->nthread;
  2493. switch (params->ftype) {
  2494. case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break;
  2495. case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break;
  2496. case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
  2497. case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
  2498. case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
  2499. case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
  2500. case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
  2501. #ifdef GGML_USE_K_QUANTS
  2502. // K-quants
  2503. case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break;
  2504. case LLAMA_FTYPE_MOSTLY_Q3_K_S:
  2505. case LLAMA_FTYPE_MOSTLY_Q3_K_M:
  2506. case LLAMA_FTYPE_MOSTLY_Q3_K_L: quantized_type = GGML_TYPE_Q3_K; break;
  2507. case LLAMA_FTYPE_MOSTLY_Q4_K_S:
  2508. case LLAMA_FTYPE_MOSTLY_Q4_K_M: quantized_type = GGML_TYPE_Q4_K; break;
  2509. case LLAMA_FTYPE_MOSTLY_Q5_K_S:
  2510. case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
  2511. case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break;
  2512. #endif
  2513. default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
  2514. }
  2515. if (nthread <= 0) {
  2516. nthread = std::thread::hardware_concurrency();
  2517. }
  2518. std::unique_ptr<llama_model_loader> model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false));
  2519. llama_file_saver file_saver(fname_out.c_str(), model_loader->file_loader.get(), params->ftype);
  2520. #ifdef GGML_USE_K_QUANTS
  2521. int n_attention_wv = 0;
  2522. int n_feed_forward_w2 = 0;
  2523. for (auto& tensor : model_loader->tensors_map.tensors) {
  2524. if (tensor.name.find("attention.wv.weight") != std::string::npos) {
  2525. ++n_attention_wv;
  2526. }
  2527. else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
  2528. ++n_feed_forward_w2;
  2529. }
  2530. }
  2531. int i_attention_wv = 0;
  2532. int i_feed_forward_w2 = 0;
  2533. #endif
  2534. size_t total_size_org = 0;
  2535. size_t total_size_new = 0;
  2536. std::vector<int64_t> hist_all(1 << 4, 0);
  2537. std::vector<std::thread> workers;
  2538. std::mutex mutex;
  2539. auto use_more_bits = [] (int i_layer, int num_layers) -> bool {
  2540. return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2;
  2541. };
  2542. size_t idx = 0;
  2543. for (llama_load_tensor & tensor : model_loader->tensors_map.tensors) {
  2544. llama_buffer read_data;
  2545. read_data.resize(tensor.size);
  2546. tensor.data = read_data.addr;
  2547. model_loader->load_data_for(tensor);
  2548. LLAMA_LOG_INFO("[%4zu/%4zu] %36s - %16s, type = %6s, ",
  2549. ++idx, model_loader->tensors_map.tensors.size(),
  2550. tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(),
  2551. ggml_type_name(tensor.type));
  2552. // This used to be a regex, but <regex> has an extreme cost to compile times.
  2553. bool quantize = tensor.name.rfind("weight") == tensor.name.size() - 6; // ends with 'weight'?
  2554. // quantize only 2D tensors
  2555. quantize &= (tensor.ne.size() == 2);
  2556. quantize &= params->quantize_output_tensor || tensor.name != "output.weight";
  2557. quantize &= quantized_type != tensor.type;
  2558. enum ggml_type new_type;
  2559. void * new_data;
  2560. size_t new_size;
  2561. llama_buffer work;
  2562. if (!quantize) {
  2563. new_type = tensor.type;
  2564. new_data = tensor.data;
  2565. new_size = tensor.size;
  2566. LLAMA_LOG_INFO("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
  2567. } else {
  2568. new_type = quantized_type;
  2569. #ifdef GGML_USE_K_QUANTS
  2570. if (tensor.name == "output.weight") {
  2571. int nx = tensor.ne.at(0);
  2572. int ny = tensor.ne.at(1);
  2573. if (nx % QK_K == 0 && ny % QK_K == 0) {
  2574. new_type = GGML_TYPE_Q6_K;
  2575. }
  2576. } else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
  2577. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
  2578. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  2579. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  2580. use_more_bits(i_attention_wv, n_attention_wv)) new_type = GGML_TYPE_Q6_K;
  2581. else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) &&
  2582. (i_attention_wv < n_attention_wv/8 || i_attention_wv >= 7*n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
  2583. ++i_attention_wv;
  2584. } else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
  2585. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
  2586. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  2587. else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
  2588. use_more_bits(i_feed_forward_w2, n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
  2589. //else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && i_feed_forward_w2 < n_feed_forward_w2/8) new_type = GGML_TYPE_Q6_K;
  2590. ++i_feed_forward_w2;
  2591. } else if (tensor.name.find("attention.wo.weight") != std::string::npos) {
  2592. if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
  2593. else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
  2594. }
  2595. bool convert_incompatible_tensor = false;
  2596. if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
  2597. new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K) {
  2598. int nx = tensor.ne.at(0);
  2599. int ny = tensor.ne.at(1);
  2600. if (nx % QK_K != 0 || ny % QK_K != 0) {
  2601. LLAMA_LOG_INFO("\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K);
  2602. convert_incompatible_tensor = true;
  2603. }
  2604. }
  2605. if (convert_incompatible_tensor) {
  2606. if (tensor.name == "output.weight") {
  2607. new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing.
  2608. LLAMA_LOG_WARN("F16 will be used for this tensor instead.\n");
  2609. } else if (tensor.name == "tok_embeddings.weight") {
  2610. new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing.
  2611. LLAMA_LOG_WARN("Q4_0 will be used for this tensor instead.\n");
  2612. } else {
  2613. throw std::runtime_error("Unsupported tensor size encountered\n");
  2614. }
  2615. }
  2616. #endif
  2617. float * f32_data;
  2618. size_t nelements = tensor.ne.at(0) * tensor.ne.at(1);
  2619. llama_buffer f32_conv_buf;
  2620. if (tensor.type == GGML_TYPE_F32) {
  2621. f32_data = (float *) tensor.data;
  2622. } else if (ggml_is_quantized(tensor.type) && !params->allow_requantize) {
  2623. throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor.type)));
  2624. } else {
  2625. llama_convert_tensor_internal(tensor, f32_conv_buf, nelements, nthread);
  2626. f32_data = (float *) f32_conv_buf.addr;
  2627. }
  2628. LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type));
  2629. fflush(stdout);
  2630. work.resize(nelements * 4); // upper bound on size
  2631. new_data = work.addr;
  2632. std::vector<int64_t> hist_cur(1 << 4, 0);
  2633. int chunk_size = 32 * 512;
  2634. const int nchunk = (nelements + chunk_size - 1)/chunk_size;
  2635. const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
  2636. if (nthread_use < 2) {
  2637. new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nelements, hist_cur.data());
  2638. } else {
  2639. size_t counter = 0;
  2640. new_size = 0;
  2641. auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements, chunk_size] () {
  2642. std::vector<int64_t> local_hist;
  2643. size_t local_size = 0;
  2644. while (true) {
  2645. std::unique_lock<std::mutex> lock(mutex);
  2646. size_t first = counter; counter += chunk_size;
  2647. if (first >= nelements) {
  2648. if (!local_hist.empty()) {
  2649. for (int j=0; j<int(local_hist.size()); ++j) {
  2650. hist_cur[j] += local_hist[j];
  2651. }
  2652. new_size += local_size;
  2653. }
  2654. break;
  2655. }
  2656. lock.unlock();
  2657. size_t last = std::min(nelements, first + chunk_size);
  2658. if (local_hist.empty()) {
  2659. local_hist.resize(hist_cur.size(), 0);
  2660. }
  2661. local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first, last - first, local_hist.data());
  2662. }
  2663. };
  2664. if ((int) workers.size() < nthread_use - 1) {
  2665. workers.resize(nthread_use - 1);
  2666. }
  2667. for (int it = 0; it < nthread_use - 1; ++it) {
  2668. workers[it] = std::thread(compute);
  2669. }
  2670. compute();
  2671. for (int it = 0; it < nthread_use - 1; ++it) {
  2672. workers[it].join();
  2673. }
  2674. }
  2675. LLAMA_LOG_INFO("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0);
  2676. int64_t tot_count = 0;
  2677. for (size_t i = 0; i < hist_cur.size(); i++) {
  2678. hist_all[i] += hist_cur[i];
  2679. tot_count += hist_cur[i];
  2680. }
  2681. if (tot_count > 0) {
  2682. for (size_t i = 0; i < hist_cur.size(); i++) {
  2683. LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements));
  2684. }
  2685. }
  2686. LLAMA_LOG_INFO("\n");
  2687. }
  2688. total_size_org += tensor.size;
  2689. total_size_new += new_size;
  2690. file_saver.write_tensor(tensor, new_type, new_data, new_size);
  2691. }
  2692. LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
  2693. LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
  2694. {
  2695. int64_t sum_all = 0;
  2696. for (size_t i = 0; i < hist_all.size(); i++) {
  2697. sum_all += hist_all[i];
  2698. }
  2699. if (sum_all > 0) {
  2700. LLAMA_LOG_INFO("%s: hist: ", __func__);
  2701. for (size_t i = 0; i < hist_all.size(); i++) {
  2702. LLAMA_LOG_INFO("%5.3f ", hist_all[i] / float(sum_all));
  2703. }
  2704. LLAMA_LOG_INFO("\n");
  2705. }
  2706. }
  2707. }
  2708. //
  2709. // interface implementation
  2710. //
  2711. struct llama_model * llama_load_model_from_file(
  2712. const char * path_model,
  2713. struct llama_context_params params) {
  2714. ggml_time_init();
  2715. llama_model * model = new llama_model;
  2716. ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
  2717. if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gqa, params.rms_norm_eps, params.n_gpu_layers,
  2718. params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale,params.low_vram,
  2719. memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback,
  2720. params.progress_callback_user_data)) {
  2721. LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  2722. delete model;
  2723. return nullptr;
  2724. }
  2725. return model;
  2726. }
  2727. void llama_free_model(struct llama_model * model) {
  2728. delete model;
  2729. }
  2730. struct llama_context * llama_new_context_with_model(
  2731. struct llama_model * model,
  2732. struct llama_context_params params) {
  2733. if (!model) {
  2734. return nullptr;
  2735. }
  2736. llama_context * ctx = new llama_context(*model);
  2737. if (params.seed == LLAMA_DEFAULT_SEED) {
  2738. params.seed = time(NULL);
  2739. }
  2740. unsigned cur_percentage = 0;
  2741. if (params.progress_callback == NULL) {
  2742. params.progress_callback_user_data = &cur_percentage;
  2743. params.progress_callback = [](float progress, void * ctx) {
  2744. unsigned * cur_percentage_p = (unsigned *) ctx;
  2745. unsigned percentage = (unsigned) (100 * progress);
  2746. while (percentage > *cur_percentage_p) {
  2747. *cur_percentage_p = percentage;
  2748. LLAMA_LOG_INFO(".");
  2749. if (percentage >= 100) {
  2750. LLAMA_LOG_INFO("\n");
  2751. }
  2752. }
  2753. };
  2754. }
  2755. ctx->rng = std::mt19937(params.seed);
  2756. ctx->logits_all = params.logits_all;
  2757. ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
  2758. // reserve memory for context buffers
  2759. if (!params.vocab_only) {
  2760. if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) {
  2761. LLAMA_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__);
  2762. llama_free(ctx);
  2763. return nullptr;
  2764. }
  2765. {
  2766. const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v);
  2767. LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
  2768. }
  2769. const auto & hparams = ctx->model.hparams;
  2770. // resized during inference
  2771. if (params.logits_all) {
  2772. ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab);
  2773. } else {
  2774. ctx->logits.reserve(hparams.n_vocab);
  2775. }
  2776. if (params.embedding){
  2777. ctx->embedding.resize(hparams.n_embd);
  2778. }
  2779. #ifdef LLAMA_USE_ALLOCATOR
  2780. {
  2781. static const size_t tensor_alignment = 32;
  2782. // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data
  2783. ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead());
  2784. // create measure allocator
  2785. ctx->alloc = ggml_allocr_new_measure(tensor_alignment);
  2786. // build worst-case graph
  2787. int n_tokens = std::min((int)hparams.n_ctx, params.n_batch);
  2788. int n_past = hparams.n_ctx - n_tokens;
  2789. llama_token token = llama_token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
  2790. ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past);
  2791. // measure memory requirements for the graph
  2792. size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment;
  2793. LLAMA_LOG_INFO("%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0);
  2794. // debug - for comparison with scratch buffer
  2795. //size_t prev_req =
  2796. // MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type) +
  2797. // MEM_REQ_SCRATCH1().at(ctx->model.type) +
  2798. // MEM_REQ_EVAL().at(ctx->model.type);
  2799. //LLAMA_LOG_INFO("%s: (debug) equivalent with scratch buffer = %7.2f MB\n", __func__, prev_req / 1024.0 / 1024.0);
  2800. // recreate allocator with exact memory requirements
  2801. ggml_allocr_free(ctx->alloc);
  2802. ctx->buf_alloc.resize(alloc_size);
  2803. ctx->alloc = ggml_allocr_new(ctx->buf_alloc.addr, ctx->buf_alloc.size, tensor_alignment);
  2804. }
  2805. #else
  2806. ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead());
  2807. #endif
  2808. #ifdef LLAMA_USE_SCRATCH
  2809. ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type));
  2810. ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type));
  2811. #endif
  2812. }
  2813. #ifdef GGML_USE_METAL
  2814. if (params.n_gpu_layers > 0) {
  2815. // this allocates all Metal resources and memory buffers
  2816. ctx->ctx_metal = ggml_metal_init(1);
  2817. if (!ctx->ctx_metal) {
  2818. LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__);
  2819. llama_free(ctx);
  2820. return NULL;
  2821. }
  2822. void * data_ptr = NULL;
  2823. size_t data_size = 0;
  2824. if (params.use_mmap) {
  2825. data_ptr = ctx->model.mapping->addr;
  2826. data_size = ctx->model.mapping->size;
  2827. } else {
  2828. data_ptr = ggml_get_mem_buffer(ctx->model.ctx);
  2829. data_size = ggml_get_mem_size (ctx->model.ctx);
  2830. }
  2831. const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx);
  2832. LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0);
  2833. #define LLAMA_METAL_CHECK_BUF(result) \
  2834. if (!(result)) { \
  2835. LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \
  2836. llama_free(ctx); \
  2837. return NULL; \
  2838. }
  2839. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
  2840. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.addr, ctx->buf_compute.size, 0));
  2841. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.addr, ctx->kv_self.buf.size, 0));
  2842. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr0", ctx->buf_scratch[0].addr, ctx->buf_scratch[0].size, 0));
  2843. LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr1", ctx->buf_scratch[1].addr, ctx->buf_scratch[1].size, 0));
  2844. #undef LLAMA_METAL_CHECK_BUF
  2845. }
  2846. #endif
  2847. #ifdef GGML_USE_MPI
  2848. ctx->ctx_mpi = ggml_mpi_init();
  2849. if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
  2850. // Enter a blocking eval loop with dummy input, letting rank=0 drive the process
  2851. const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos());
  2852. while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
  2853. llama_backend_free();
  2854. exit(1);
  2855. }
  2856. #endif
  2857. return ctx;
  2858. }
  2859. struct llama_context * llama_init_from_file(
  2860. const char * path_model,
  2861. struct llama_context_params params) {
  2862. struct llama_model * model = llama_load_model_from_file(path_model, params);
  2863. if (!model) {
  2864. return nullptr;
  2865. }
  2866. struct llama_context * ctx = llama_new_context_with_model(model, params);
  2867. ctx->model_owner = true;
  2868. return ctx;
  2869. }
  2870. void llama_free(struct llama_context * ctx) {
  2871. delete ctx;
  2872. }
  2873. int llama_model_quantize(
  2874. const char * fname_inp,
  2875. const char * fname_out,
  2876. const llama_model_quantize_params *params) {
  2877. try {
  2878. llama_model_quantize_internal(fname_inp, fname_out, params);
  2879. return 0;
  2880. } catch (const std::exception & err) {
  2881. LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
  2882. return 1;
  2883. }
  2884. }
  2885. int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) {
  2886. LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
  2887. const int64_t t_start_lora_us = ggml_time_us();
  2888. auto fin = std::ifstream(path_lora, std::ios::binary);
  2889. if (!fin) {
  2890. LLAMA_LOG_ERROR("%s: failed to open '%s'\n", __func__, path_lora);
  2891. return 1;
  2892. }
  2893. // verify magic and version
  2894. {
  2895. uint32_t magic;
  2896. fin.read((char *) &magic, sizeof(magic));
  2897. if (magic != LLAMA_FILE_MAGIC_GGLA) {
  2898. LLAMA_LOG_ERROR("%s: bad file magic\n", __func__);
  2899. return 1;
  2900. }
  2901. uint32_t format_version;
  2902. fin.read((char *) &format_version, sizeof(format_version));
  2903. if (format_version != 1) {
  2904. LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
  2905. return 1;
  2906. }
  2907. }
  2908. int32_t lora_r;
  2909. int32_t lora_alpha;
  2910. fin.read((char *) &lora_r, sizeof(lora_r));
  2911. fin.read((char *) &lora_alpha, sizeof(lora_alpha));
  2912. float scaling = (float)lora_alpha / (float)lora_r;
  2913. LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
  2914. // create a temporary ggml context to store the lora tensors
  2915. // todo: calculate size from biggest possible tensor
  2916. std::vector<uint8_t> lora_buf(1024ull * 1024ull * 1024ull);
  2917. struct ggml_init_params params;
  2918. params.mem_size = lora_buf.size();
  2919. params.mem_buffer = lora_buf.data();
  2920. params.no_alloc = false;
  2921. ggml_context * lora_ctx = ggml_init(params);
  2922. std::unordered_map<std::string, struct ggml_tensor *> lora_tensors;
  2923. // create a name -> tensor map of the model to accelerate lookups
  2924. std::unordered_map<std::string, struct ggml_tensor*> model_tensors;
  2925. for (const auto & kv: model.tensors_by_name) {
  2926. model_tensors.insert(kv);
  2927. }
  2928. // load base model
  2929. std::unique_ptr<llama_model_loader> model_loader;
  2930. ggml_context * base_ctx = NULL;
  2931. llama_buffer base_buf;
  2932. if (path_base_model) {
  2933. LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
  2934. model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
  2935. size_t ctx_size;
  2936. size_t mmapped_size;
  2937. model_loader->calc_sizes(&ctx_size, &mmapped_size);
  2938. base_buf.resize(ctx_size);
  2939. ggml_init_params base_params;
  2940. base_params.mem_size = base_buf.size;
  2941. base_params.mem_buffer = base_buf.addr;
  2942. base_params.no_alloc = model_loader->use_mmap;
  2943. base_ctx = ggml_init(base_params);
  2944. model_loader->ggml_ctx = base_ctx;
  2945. // maybe this should in llama_model_loader
  2946. if (model_loader->use_mmap) {
  2947. model_loader->mapping.reset(new llama_mmap(&model_loader->file_loader->file, /* prefetch */ 0, ggml_is_numa()));
  2948. }
  2949. }
  2950. // read tensors and apply
  2951. bool warned = false;
  2952. int n_tensors = 0;
  2953. std::vector<uint8_t> work_buffer;
  2954. while (true) {
  2955. int32_t n_dims;
  2956. int32_t length;
  2957. int32_t ftype;
  2958. fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
  2959. fin.read(reinterpret_cast<char *>(&length), sizeof(length));
  2960. fin.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
  2961. if (fin.eof()) {
  2962. break;
  2963. }
  2964. int32_t ne[2] = { 1, 1 };
  2965. for (int i = 0; i < n_dims; ++i) {
  2966. fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
  2967. }
  2968. std::string name;
  2969. {
  2970. char buf[1024];
  2971. fin.read(buf, length);
  2972. name = std::string(buf, length);
  2973. }
  2974. // check for lora suffix and get the type of tensor
  2975. const std::string lora_suffix = ".lora";
  2976. size_t pos = name.rfind(lora_suffix);
  2977. if (pos == std::string::npos) {
  2978. LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
  2979. return 1;
  2980. }
  2981. std::string lora_type = name.substr(pos + lora_suffix.length());
  2982. std::string base_name = name;
  2983. base_name.erase(pos);
  2984. // LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
  2985. if (model_tensors.find(base_name) == model_tensors.end()) {
  2986. LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
  2987. return 1;
  2988. }
  2989. // create ggml tensor
  2990. ggml_type wtype;
  2991. switch (ftype) {
  2992. case 0: wtype = GGML_TYPE_F32; break;
  2993. case 1: wtype = GGML_TYPE_F16; break;
  2994. default:
  2995. {
  2996. LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
  2997. __func__, ftype);
  2998. return false;
  2999. }
  3000. }
  3001. ggml_tensor * lora_tensor;
  3002. if (n_dims == 2) {
  3003. lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]);
  3004. }
  3005. else {
  3006. LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
  3007. return 1;
  3008. }
  3009. ggml_set_name(lora_tensor, "lora_tensor");
  3010. // load tensor data
  3011. size_t offset = fin.tellg();
  3012. size_t tensor_data_size = ggml_nbytes(lora_tensor);
  3013. offset = (offset + 31) & -32;
  3014. fin.seekg(offset);
  3015. fin.read((char*)lora_tensor->data, tensor_data_size);
  3016. lora_tensors[name] = lora_tensor;
  3017. // check if we have both A and B tensors and apply
  3018. if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() &&
  3019. lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
  3020. ggml_tensor * dest_t = model_tensors[base_name];
  3021. offload_func_t offload_func = llama_nop;
  3022. offload_func_t offload_func_force_inplace = llama_nop;
  3023. #ifdef GGML_USE_CUBLAS
  3024. if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) {
  3025. if (dest_t->type != GGML_TYPE_F16) {
  3026. throw std::runtime_error(format(
  3027. "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models", __func__));
  3028. }
  3029. offload_func = ggml_cuda_assign_buffers;
  3030. offload_func_force_inplace = ggml_cuda_assign_buffers_force_inplace;
  3031. }
  3032. #endif // GGML_USE_CUBLAS
  3033. ggml_tensor * base_t;
  3034. if (model_loader) {
  3035. // load from base model
  3036. if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) {
  3037. LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
  3038. return 1;
  3039. }
  3040. size_t idx = model_loader->tensors_map.name_to_idx[base_name];
  3041. llama_load_tensor & lt = model_loader->tensors_map.tensors[idx];
  3042. base_t = model_loader->get_tensor(base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU);
  3043. lt.data = (uint8_t *) lt.ggml_tensor->data;
  3044. model_loader->load_data_for(lt);
  3045. lt.ggml_tensor->data = lt.data;
  3046. }
  3047. else {
  3048. base_t = dest_t;
  3049. }
  3050. if (ggml_is_quantized(base_t->type)) {
  3051. if (!warned) {
  3052. LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
  3053. "use a f16 or f32 base model with --lora-base\n", __func__);
  3054. warned = true;
  3055. }
  3056. }
  3057. ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
  3058. GGML_ASSERT(loraA->type == GGML_TYPE_F32);
  3059. ggml_set_name(loraA, "loraA");
  3060. ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
  3061. GGML_ASSERT(loraB->type == GGML_TYPE_F32);
  3062. ggml_set_name(loraB, "loraB");
  3063. if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
  3064. LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
  3065. " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
  3066. return 1;
  3067. }
  3068. // w = w + BA*s
  3069. ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB);
  3070. offload_func(BA);
  3071. ggml_set_name(BA, "BA");
  3072. if (scaling != 1.0f) {
  3073. ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx, scaling);
  3074. ggml_set_name(scale_tensor, "scale_tensor");
  3075. BA = ggml_scale_inplace(lora_ctx, BA, scale_tensor);
  3076. offload_func(BA);
  3077. ggml_set_name(BA, "BA_scaled");
  3078. }
  3079. ggml_tensor * r;
  3080. if (base_t == dest_t) {
  3081. r = ggml_add_inplace(lora_ctx, dest_t, BA);
  3082. offload_func_force_inplace(r);
  3083. ggml_set_name(r, "r_add_inplace");
  3084. }
  3085. else {
  3086. r = ggml_add(lora_ctx, base_t, BA);
  3087. offload_func(r);
  3088. ggml_set_name(r, "r_add");
  3089. r = ggml_cpy(lora_ctx, r, dest_t);
  3090. offload_func(r);
  3091. ggml_set_name(r, "r_cpy");
  3092. }
  3093. struct ggml_cgraph gf = ggml_build_forward(r);
  3094. ggml_graph_compute_helper(work_buffer, &gf, n_threads);
  3095. // we won't need these tensors again, reset the context to save memory
  3096. ggml_free(lora_ctx);
  3097. lora_ctx = ggml_init(params);
  3098. lora_tensors.clear();
  3099. n_tensors++;
  3100. if (n_tensors % 4 == 0) {
  3101. LLAMA_LOG_INFO(".");
  3102. }
  3103. }
  3104. }
  3105. // TODO: this should be in a destructor, it will leak on failure
  3106. ggml_free(lora_ctx);
  3107. if (base_ctx) {
  3108. ggml_free(base_ctx);
  3109. }
  3110. const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
  3111. LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
  3112. return 0;
  3113. }
  3114. int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) {
  3115. try {
  3116. return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads);
  3117. } catch (const std::exception & err) {
  3118. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  3119. return 1;
  3120. }
  3121. }
  3122. int llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, const char * path_base_model, int n_threads) {
  3123. try {
  3124. return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads);
  3125. } catch (const std::exception & err) {
  3126. LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
  3127. return 1;
  3128. }
  3129. }
  3130. int llama_get_kv_cache_token_count(const struct llama_context * ctx) {
  3131. return ctx->kv_self.n;
  3132. }
  3133. #define LLAMA_MAX_RNG_STATE (64*1024)
  3134. void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) {
  3135. if (seed == LLAMA_DEFAULT_SEED) {
  3136. seed = time(NULL);
  3137. }
  3138. ctx->rng.seed(seed);
  3139. }
  3140. // Returns the *maximum* size of the state
  3141. size_t llama_get_state_size(const struct llama_context * ctx) {
  3142. // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state.
  3143. // for reference, std::mt19937(1337) serializes to 6701 bytes.
  3144. const size_t s_rng_size = sizeof(size_t);
  3145. const size_t s_rng = LLAMA_MAX_RNG_STATE;
  3146. const size_t s_logits_capacity = sizeof(size_t);
  3147. const size_t s_logits_size = sizeof(size_t);
  3148. const size_t s_logits = ctx->logits.capacity() * sizeof(float);
  3149. const size_t s_embedding_size = sizeof(size_t);
  3150. const size_t s_embedding = ctx->embedding.size() * sizeof(float);
  3151. const size_t s_kv_size = sizeof(size_t);
  3152. const size_t s_kv_ntok = sizeof(int);
  3153. const size_t s_kv = ctx->kv_self.buf.size;
  3154. const size_t s_total = (
  3155. + s_rng_size
  3156. + s_rng
  3157. + s_logits_capacity
  3158. + s_logits_size
  3159. + s_logits
  3160. + s_embedding_size
  3161. + s_embedding
  3162. + s_kv_size
  3163. + s_kv_ntok
  3164. + s_kv
  3165. );
  3166. return s_total;
  3167. }
  3168. /** copy state data into either a buffer or file depending on the passed in context
  3169. *
  3170. * file context:
  3171. * llama_file file("/path", "wb");
  3172. * llama_data_file_context data_ctx(&file);
  3173. * llama_copy_state_data(ctx, &data_ctx);
  3174. *
  3175. * buffer context:
  3176. * std::vector<uint8_t> buf(max_size, 0);
  3177. * llama_data_buffer_context data_ctx(&buf.data());
  3178. * llama_copy_state_data(ctx, &data_ctx);
  3179. *
  3180. */
  3181. void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
  3182. // copy rng
  3183. {
  3184. std::stringstream rng_ss;
  3185. rng_ss << ctx->rng;
  3186. const size_t rng_size = rng_ss.str().size();
  3187. char rng_buf[LLAMA_MAX_RNG_STATE];
  3188. memset(&rng_buf[0], 0, LLAMA_MAX_RNG_STATE);
  3189. memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size());
  3190. data_ctx->write(&rng_size, sizeof(rng_size));
  3191. data_ctx->write(&rng_buf[0], LLAMA_MAX_RNG_STATE);
  3192. }
  3193. // copy logits
  3194. {
  3195. const size_t logits_cap = ctx->logits.capacity();
  3196. const size_t logits_size = ctx->logits.size();
  3197. data_ctx->write(&logits_cap, sizeof(logits_cap));
  3198. data_ctx->write(&logits_size, sizeof(logits_size));
  3199. if (logits_size) {
  3200. data_ctx->write(ctx->logits.data(), logits_size * sizeof(float));
  3201. }
  3202. // If there is a gap between the size and the capacity, write padding
  3203. size_t padding_size = (logits_cap - logits_size) * sizeof(float);
  3204. if (padding_size > 0) {
  3205. std::vector<uint8_t> padding(padding_size, 0); // Create a buffer filled with zeros
  3206. data_ctx->write(padding.data(), padding_size);
  3207. }
  3208. }
  3209. // copy embeddings
  3210. {
  3211. const size_t embedding_size = ctx->embedding.size();
  3212. data_ctx->write(&embedding_size, sizeof(embedding_size));
  3213. if (embedding_size) {
  3214. data_ctx->write(ctx->embedding.data(), embedding_size * sizeof(float));
  3215. }
  3216. }
  3217. // copy kv cache
  3218. {
  3219. const auto & kv_self = ctx->kv_self;
  3220. const auto & hparams = ctx->model.hparams;
  3221. const int n_layer = hparams.n_layer;
  3222. const int n_embd = hparams.n_embd_gqa();
  3223. const int n_ctx = hparams.n_ctx;
  3224. const size_t kv_size = kv_self.buf.size;
  3225. const int kv_ntok = llama_get_kv_cache_token_count(ctx);
  3226. data_ctx->write(&kv_size, sizeof(kv_size));
  3227. data_ctx->write(&kv_ntok, sizeof(kv_ntok));
  3228. if (kv_size) {
  3229. const size_t elt_size = ggml_element_size(kv_self.k);
  3230. ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
  3231. ggml_cgraph gf{};
  3232. ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
  3233. std::vector<uint8_t> kout3d_data(ggml_nbytes(kout3d), 0);
  3234. kout3d->data = kout3d_data.data();
  3235. ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
  3236. std::vector<uint8_t> vout3d_data(ggml_nbytes(vout3d), 0);
  3237. vout3d->data = vout3d_data.data();
  3238. ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
  3239. n_embd, kv_ntok, n_layer,
  3240. elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
  3241. ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
  3242. kv_ntok, n_embd, n_layer,
  3243. elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
  3244. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, k3d, kout3d));
  3245. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, v3d, vout3d));
  3246. ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
  3247. ggml_free(cpy_ctx);
  3248. // our data is now in the kout3d_data and vout3d_data buffers
  3249. // write them to file
  3250. data_ctx->write(kout3d_data.data(), kout3d_data.size());
  3251. data_ctx->write(vout3d_data.data(), vout3d_data.size());
  3252. }
  3253. }
  3254. }
  3255. size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
  3256. llama_data_buffer_context data_ctx(dst);
  3257. llama_copy_state_data_internal(ctx, &data_ctx);
  3258. return data_ctx.get_size_written();
  3259. }
  3260. // Sets the state reading from the specified source address
  3261. size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
  3262. uint8_t * inp = src;
  3263. // set rng
  3264. {
  3265. size_t rng_size;
  3266. char rng_buf[LLAMA_MAX_RNG_STATE];
  3267. memcpy(&rng_size, inp, sizeof(rng_size)); inp += sizeof(rng_size);
  3268. memcpy(&rng_buf[0], inp, LLAMA_MAX_RNG_STATE); inp += LLAMA_MAX_RNG_STATE;
  3269. std::stringstream rng_ss;
  3270. rng_ss.str(std::string(&rng_buf[0], rng_size));
  3271. rng_ss >> ctx->rng;
  3272. LLAMA_ASSERT(rng_ss.fail() == false);
  3273. }
  3274. // set logits
  3275. {
  3276. size_t logits_cap;
  3277. size_t logits_size;
  3278. memcpy(&logits_cap, inp, sizeof(logits_cap)); inp += sizeof(logits_cap);
  3279. memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size);
  3280. LLAMA_ASSERT(ctx->logits.capacity() == logits_cap);
  3281. if (logits_size) {
  3282. ctx->logits.resize(logits_size);
  3283. memcpy(ctx->logits.data(), inp, logits_size * sizeof(float));
  3284. }
  3285. inp += logits_cap * sizeof(float);
  3286. }
  3287. // set embeddings
  3288. {
  3289. size_t embedding_size;
  3290. memcpy(&embedding_size, inp, sizeof(embedding_size)); inp += sizeof(embedding_size);
  3291. LLAMA_ASSERT(ctx->embedding.capacity() == embedding_size);
  3292. if (embedding_size) {
  3293. memcpy(ctx->embedding.data(), inp, embedding_size * sizeof(float));
  3294. inp += embedding_size * sizeof(float);
  3295. }
  3296. }
  3297. // set kv cache
  3298. {
  3299. const auto & kv_self = ctx->kv_self;
  3300. const auto & hparams = ctx->model.hparams;
  3301. const int n_layer = hparams.n_layer;
  3302. const int n_embd = hparams.n_embd_gqa();
  3303. const int n_ctx = hparams.n_ctx;
  3304. size_t kv_size;
  3305. int kv_ntok;
  3306. memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
  3307. memcpy(&kv_ntok, inp, sizeof(kv_ntok)); inp += sizeof(kv_ntok);
  3308. if (kv_size) {
  3309. LLAMA_ASSERT(kv_self.buf.size == kv_size);
  3310. const size_t elt_size = ggml_element_size(kv_self.k);
  3311. ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
  3312. ggml_cgraph gf{};
  3313. ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
  3314. kin3d->data = (void *) inp;
  3315. inp += ggml_nbytes(kin3d);
  3316. ggml_tensor * vin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
  3317. vin3d->data = (void *) inp;
  3318. inp += ggml_nbytes(vin3d);
  3319. ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
  3320. n_embd, kv_ntok, n_layer,
  3321. elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
  3322. ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
  3323. kv_ntok, n_embd, n_layer,
  3324. elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
  3325. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, kin3d, k3d));
  3326. ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, vin3d, v3d));
  3327. ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
  3328. ggml_free(cpy_ctx);
  3329. }
  3330. ctx->kv_self.n = kv_ntok;
  3331. }
  3332. const size_t nread = inp - src;
  3333. const size_t max_size = llama_get_state_size(ctx);
  3334. LLAMA_ASSERT(nread <= max_size);
  3335. return nread;
  3336. }
  3337. static bool llama_load_session_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  3338. llama_file file(path_session, "rb");
  3339. // sanity checks
  3340. {
  3341. const uint32_t magic = file.read_u32();
  3342. const uint32_t version = file.read_u32();
  3343. if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
  3344. LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
  3345. return false;
  3346. }
  3347. llama_hparams session_hparams;
  3348. file.read_raw(&session_hparams, sizeof(llama_hparams));
  3349. if (session_hparams != ctx->model.hparams) {
  3350. LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
  3351. return false;
  3352. }
  3353. }
  3354. // load the prompt
  3355. {
  3356. const uint32_t n_token_count = file.read_u32();
  3357. if (n_token_count > n_token_capacity) {
  3358. LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
  3359. return false;
  3360. }
  3361. file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
  3362. *n_token_count_out = n_token_count;
  3363. }
  3364. // restore the context state
  3365. {
  3366. const size_t n_state_size_cur = file.size - file.tell();
  3367. const size_t n_state_size_max = llama_get_state_size(ctx);
  3368. if (n_state_size_cur > n_state_size_max) {
  3369. LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
  3370. return false;
  3371. }
  3372. std::vector<uint8_t> state_data(n_state_size_max);
  3373. file.read_raw(state_data.data(), n_state_size_cur);
  3374. llama_set_state_data(ctx, state_data.data());
  3375. }
  3376. return true;
  3377. }
  3378. bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
  3379. try {
  3380. return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
  3381. } catch (const std::exception & err) {
  3382. LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
  3383. return false;
  3384. }
  3385. }
  3386. bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
  3387. llama_file file(path_session, "wb");
  3388. file.write_u32(LLAMA_SESSION_MAGIC);
  3389. file.write_u32(LLAMA_SESSION_VERSION);
  3390. file.write_raw(&ctx->model.hparams, sizeof(llama_hparams));
  3391. // save the prompt
  3392. file.write_u32((uint32_t) n_token_count);
  3393. file.write_raw(tokens, sizeof(llama_token) * n_token_count);
  3394. // save the context state using stream saving
  3395. llama_data_file_context data_ctx(&file);
  3396. llama_copy_state_data_internal(ctx, &data_ctx);
  3397. return true;
  3398. }
  3399. int llama_eval(
  3400. struct llama_context * ctx,
  3401. const llama_token * tokens,
  3402. int n_tokens,
  3403. int n_past,
  3404. int n_threads) {
  3405. if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) {
  3406. LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
  3407. return 1;
  3408. }
  3409. // get a more accurate load time, upon first eval
  3410. // TODO: fix this
  3411. if (!ctx->has_evaluated_once) {
  3412. ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
  3413. ctx->has_evaluated_once = true;
  3414. }
  3415. return 0;
  3416. }
  3417. int llama_eval_embd(
  3418. struct llama_context * ctx,
  3419. const float * embd,
  3420. int n_tokens,
  3421. int n_past,
  3422. int n_threads) {
  3423. if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) {
  3424. LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
  3425. return 1;
  3426. }
  3427. // get a more accurate load time, upon first eval
  3428. // TODO: fix this
  3429. if (!ctx->has_evaluated_once) {
  3430. ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
  3431. ctx->has_evaluated_once = true;
  3432. }
  3433. return 0;
  3434. }
  3435. int llama_eval_export(struct llama_context * ctx, const char * fname) {
  3436. const int n_batch = 1;
  3437. const int n_ctx = 512 - n_batch;
  3438. const std::vector<llama_token> tmp(n_batch, llama_token_bos());
  3439. if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) {
  3440. LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
  3441. return 1;
  3442. }
  3443. return 0;
  3444. }
  3445. int llama_tokenize_with_model(
  3446. const struct llama_model * model,
  3447. const char * text,
  3448. llama_token * tokens,
  3449. int n_max_tokens,
  3450. bool add_bos) {
  3451. auto res = llama_tokenize(model->vocab, text, add_bos);
  3452. if (n_max_tokens < (int) res.size()) {
  3453. LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
  3454. return -((int) res.size());
  3455. }
  3456. for (size_t i = 0; i < res.size(); i++) {
  3457. tokens[i] = res[i];
  3458. }
  3459. return res.size();
  3460. }
  3461. int llama_tokenize(
  3462. struct llama_context * ctx,
  3463. const char * text,
  3464. llama_token * tokens,
  3465. int n_max_tokens,
  3466. bool add_bos) {
  3467. return llama_tokenize_with_model(&ctx->model, text, tokens, n_max_tokens, add_bos);
  3468. }
  3469. int llama_n_vocab_from_model(const struct llama_model * model) {
  3470. return model->vocab.id_to_token.size();
  3471. }
  3472. int llama_n_ctx_from_model(const struct llama_model * model) {
  3473. return model->hparams.n_ctx;
  3474. }
  3475. int llama_n_embd_from_model(const struct llama_model * model) {
  3476. return model->hparams.n_embd;
  3477. }
  3478. int llama_n_vocab(const struct llama_context * ctx) {
  3479. return ctx->model.vocab.id_to_token.size();
  3480. }
  3481. int llama_n_ctx(const struct llama_context * ctx) {
  3482. return ctx->model.hparams.n_ctx;
  3483. }
  3484. int llama_n_embd(const struct llama_context * ctx) {
  3485. return ctx->model.hparams.n_embd;
  3486. }
  3487. int llama_get_vocab_from_model(
  3488. const struct llama_model * model,
  3489. const char * * strings,
  3490. float * scores,
  3491. int capacity) {
  3492. int n = std::min(capacity, (int) model->vocab.id_to_token.size());
  3493. for (int i = 0; i<n; ++i) {
  3494. strings[i] = model->vocab.id_to_token[i].tok.c_str();
  3495. scores[i] = model->vocab.id_to_token[i].score;
  3496. }
  3497. return n;
  3498. }
  3499. int llama_get_vocab(
  3500. const struct llama_context * ctx,
  3501. const char * * strings,
  3502. float * scores,
  3503. int capacity) {
  3504. return llama_get_vocab_from_model(&ctx->model, strings, scores, capacity);
  3505. }
  3506. float * llama_get_logits(struct llama_context * ctx) {
  3507. return ctx->logits.data();
  3508. }
  3509. float * llama_get_embeddings(struct llama_context * ctx) {
  3510. return ctx->embedding.data();
  3511. }
  3512. const char * llama_token_to_str_with_model(const struct llama_model * model, llama_token token) {
  3513. if (token >= llama_n_vocab_from_model(model)) {
  3514. return nullptr;
  3515. }
  3516. return model->vocab.id_to_token[token].tok.c_str();
  3517. }
  3518. const char * llama_token_to_str(const struct llama_context * ctx, llama_token token) {
  3519. return llama_token_to_str_with_model(&ctx->model, token);
  3520. }
  3521. llama_token llama_token_bos() {
  3522. return 1;
  3523. }
  3524. llama_token llama_token_eos() {
  3525. return 2;
  3526. }
  3527. llama_token llama_token_nl() {
  3528. return 13;
  3529. }
  3530. struct llama_timings llama_get_timings(struct llama_context * ctx) {
  3531. struct llama_timings result = {
  3532. /*.t_start_ms =*/ 1e-3 * ctx->t_start_us,
  3533. /*.t_end_ms =*/ 1.00 * ggml_time_ms(),
  3534. /*.t_load_ms =*/ 1e-3 * ctx->t_load_us,
  3535. /*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us,
  3536. /*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us,
  3537. /*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
  3538. /*.n_sample =*/ std::max(1, ctx->n_sample),
  3539. /*.n_p_eval =*/ std::max(1, ctx->n_p_eval),
  3540. /*.n_eval =*/ std::max(1, ctx->n_eval),
  3541. };
  3542. return result;
  3543. }
  3544. void llama_print_timings(struct llama_context * ctx) {
  3545. const llama_timings timings = llama_get_timings(ctx);
  3546. LLAMA_LOG_INFO("\n");
  3547. LLAMA_LOG_INFO("%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
  3548. LLAMA_LOG_INFO("%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  3549. __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
  3550. LLAMA_LOG_INFO("%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
  3551. __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
  3552. LLAMA_LOG_INFO("%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
  3553. __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
  3554. LLAMA_LOG_INFO("%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
  3555. }
  3556. void llama_reset_timings(struct llama_context * ctx) {
  3557. ctx->t_start_us = ggml_time_us();
  3558. ctx->t_sample_us = ctx->n_sample = 0;
  3559. ctx->t_eval_us = ctx->n_eval = 0;
  3560. ctx->t_p_eval_us = ctx->n_p_eval = 0;
  3561. }
  3562. const char * llama_print_system_info(void) {
  3563. static std::string s;
  3564. s = "";
  3565. s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
  3566. s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
  3567. s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
  3568. s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";
  3569. s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | ";
  3570. s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
  3571. s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
  3572. s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
  3573. s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
  3574. s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
  3575. s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
  3576. s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
  3577. s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
  3578. s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
  3579. return s.c_str();
  3580. }
  3581. // For internal test use
  3582. const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) {
  3583. return ctx->model.tensors_by_name;
  3584. }
  3585. void llama_log_set(llama_log_callback log_callback, void * user_data) {
  3586. g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
  3587. g_state.log_callback_user_data = user_data;
  3588. }
  3589. #if defined(_MSC_VER) && !defined(vsnprintf)
  3590. #define vsnprintf _vsnprintf
  3591. #endif
  3592. static void llama_log_internal_v(llama_log_level level, const char * format, va_list args) {
  3593. va_list args_copy;
  3594. va_copy(args_copy, args);
  3595. char buffer[128];
  3596. int len = vsnprintf(buffer, 128, format, args);
  3597. if (len < 128) {
  3598. g_state.log_callback(level, buffer, g_state.log_callback_user_data);
  3599. } else {
  3600. char* buffer2 = new char[len+1];
  3601. vsnprintf(buffer2, len+1, format, args_copy);
  3602. buffer2[len] = 0;
  3603. g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
  3604. delete[] buffer2;
  3605. }
  3606. va_end(args_copy);
  3607. }
  3608. static void llama_log_internal(llama_log_level level, const char * format, ...) {
  3609. va_list args;
  3610. va_start(args, format);
  3611. llama_log_internal_v(level, format, args);
  3612. va_end(args);
  3613. }
  3614. static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data) {
  3615. (void) level;
  3616. (void) user_data;
  3617. fputs(text, stderr);
  3618. fflush(stderr);
  3619. }