1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653186541865518656186571865818659186601866118662186631866418665186661866718668186691867018671186721867318674186751867618677186781867918680186811868218683186841868518686186871868818689186901869118692186931869418695186961869718698186991870018701187021870318704187051870618707187081870918710187111871218713187141871518716187171871818719187201872118722187231872418725187261872718728187291873018731187321873318734187351873618737187381873918740187411874218743187441874518746187471874818749187501875118752187531875418755187561875718758187591876018761187621876318764187651876618767187681876918770187711877218773187741877518776187771877818779187801878118782187831878418785187861878718788187891879018791187921879318794187951879618797187981879918800188011880218803188041880518806188071880818809188101881118812188131881418815188161881718818188191882018821188221882318824188251882618827188281882918830188311883218833188341883518836188371883818839188401884118842188431884418845188461884718848188491885018851188521885318854188551885618857188581885918860188611886218863188641886518866188671886818869188701887118872188731887418875188761887718878188791888018881188821888318884188851888618887188881888918890188911889218893188941889518896188971889818899189001890118902189031890418905189061890718908189091891018911189121891318914189151891618917189181891918920189211892218923189241892518926189271892818929189301893118932189331893418935189361893718938189391894018941189421894318944189451894618947189481894918950189511895218953189541895518956189571895818959189601896118962189631896418965189661896718968189691897018971189721897318974189751897618977189781897918980189811898218983189841898518986189871898818989189901899118992189931899418995189961899718998189991900019001190021900319004190051900619007190081900919010190111901219013190141901519016190171901819019190201902119022190231902419025190261902719028190291903019031190321903319034190351903619037190381903919040190411904219043190441904519046190471904819049190501905119052190531905419055190561905719058190591906019061190621906319064190651906619067190681906919070190711907219073190741907519076190771907819079190801908119082190831908419085190861908719088190891909019091190921909319094190951909619097190981909919100191011910219103191041910519106191071910819109191101911119112191131911419115191161911719118191191912019121191221912319124191251912619127191281912919130191311913219133191341913519136191371913819139191401914119142191431914419145191461914719148191491915019151191521915319154191551915619157191581915919160191611916219163191641916519166191671916819169191701917119172191731917419175191761917719178191791918019181191821918319184191851918619187191881918919190191911919219193191941919519196191971919819199192001920119202192031920419205192061920719208192091921019211192121921319214192151921619217192181921919220192211922219223192241922519226192271922819229192301923119232192331923419235192361923719238192391924019241192421924319244192451924619247192481924919250192511925219253192541925519256192571925819259192601926119262192631926419265192661926719268192691927019271192721927319274192751927619277192781927919280192811928219283192841928519286192871928819289192901929119292192931929419295192961929719298192991930019301193021930319304193051930619307193081930919310193111931219313193141931519316193171931819319193201932119322193231932419325193261932719328193291933019331193321933319334193351933619337193381933919340193411934219343193441934519346193471934819349193501935119352193531935419355193561935719358193591936019361193621936319364193651936619367193681936919370193711937219373193741937519376193771937819379193801938119382193831938419385193861938719388193891939019391193921939319394193951939619397193981939919400194011940219403194041940519406194071940819409194101941119412194131941419415194161941719418194191942019421194221942319424194251942619427194281942919430194311943219433194341943519436194371943819439194401944119442194431944419445194461944719448194491945019451194521945319454194551945619457194581945919460194611946219463194641946519466194671946819469194701947119472194731947419475194761947719478194791948019481194821948319484194851948619487194881948919490194911949219493194941949519496194971949819499195001950119502195031950419505195061950719508195091951019511195121951319514195151951619517195181951919520195211952219523195241952519526195271952819529195301953119532195331953419535195361953719538195391954019541195421954319544195451954619547195481954919550195511955219553195541955519556195571955819559195601956119562195631956419565195661956719568195691957019571195721957319574195751957619577195781957919580195811958219583195841958519586195871958819589195901959119592195931959419595195961959719598195991960019601196021960319604196051960619607196081960919610196111961219613196141961519616196171961819619196201962119622196231962419625196261962719628196291963019631196321963319634196351963619637196381963919640196411964219643196441964519646196471964819649196501965119652196531965419655196561965719658196591966019661196621966319664196651966619667196681966919670196711967219673196741967519676196771967819679196801968119682196831968419685196861968719688196891969019691196921969319694196951969619697196981969919700197011970219703197041970519706197071970819709197101971119712197131971419715197161971719718197191972019721197221972319724197251972619727197281972919730197311973219733197341973519736197371973819739197401974119742197431974419745197461974719748197491975019751197521975319754197551975619757197581975919760197611976219763197641976519766197671976819769197701977119772197731977419775197761977719778197791978019781197821978319784197851978619787197881978919790197911979219793197941979519796197971979819799198001980119802198031980419805198061980719808198091981019811198121981319814198151981619817198181981919820198211982219823198241982519826198271982819829198301983119832198331983419835198361983719838198391984019841198421984319844198451984619847198481984919850198511985219853198541985519856198571985819859198601986119862198631986419865198661986719868198691987019871198721987319874198751987619877198781987919880198811988219883198841988519886198871988819889198901989119892198931989419895198961989719898198991990019901199021990319904199051990619907199081990919910199111991219913199141991519916199171991819919199201992119922199231992419925199261992719928199291993019931199321993319934199351993619937199381993919940199411994219943199441994519946199471994819949199501995119952199531995419955199561995719958199591996019961199621996319964199651996619967199681996919970199711997219973199741997519976199771997819979199801998119982199831998419985199861998719988199891999019991199921999319994199951999619997199981999920000200012000220003200042000520006200072000820009200102001120012200132001420015200162001720018200192002020021200222002320024200252002620027200282002920030200312003220033200342003520036200372003820039200402004120042200432004420045200462004720048200492005020051200522005320054200552005620057200582005920060200612006220063200642006520066200672006820069200702007120072200732007420075200762007720078200792008020081200822008320084200852008620087200882008920090200912009220093200942009520096200972009820099201002010120102201032010420105201062010720108201092011020111201122011320114201152011620117201182011920120201212012220123201242012520126201272012820129201302013120132201332013420135201362013720138201392014020141201422014320144201452014620147201482014920150201512015220153201542015520156201572015820159201602016120162201632016420165201662016720168201692017020171201722017320174201752017620177201782017920180201812018220183201842018520186201872018820189201902019120192201932019420195201962019720198201992020020201202022020320204202052020620207202082020920210202112021220213202142021520216202172021820219202202022120222202232022420225202262022720228202292023020231202322023320234202352023620237202382023920240202412024220243202442024520246202472024820249202502025120252202532025420255202562025720258202592026020261202622026320264202652026620267202682026920270202712027220273202742027520276202772027820279202802028120282202832028420285202862028720288202892029020291202922029320294202952029620297202982029920300203012030220303203042030520306203072030820309203102031120312203132031420315203162031720318203192032020321203222032320324203252032620327203282032920330203312033220333203342033520336203372033820339203402034120342203432034420345203462034720348203492035020351203522035320354203552035620357203582035920360203612036220363203642036520366203672036820369203702037120372203732037420375203762037720378203792038020381203822038320384203852038620387203882038920390203912039220393203942039520396203972039820399204002040120402204032040420405204062040720408204092041020411204122041320414204152041620417204182041920420204212042220423204242042520426204272042820429204302043120432204332043420435204362043720438204392044020441204422044320444204452044620447204482044920450204512045220453204542045520456204572045820459204602046120462204632046420465204662046720468204692047020471204722047320474204752047620477204782047920480204812048220483204842048520486204872048820489204902049120492204932049420495204962049720498204992050020501205022050320504205052050620507205082050920510205112051220513205142051520516205172051820519205202052120522205232052420525205262052720528205292053020531205322053320534205352053620537205382053920540205412054220543205442054520546205472054820549205502055120552205532055420555205562055720558205592056020561205622056320564205652056620567205682056920570205712057220573205742057520576205772057820579205802058120582205832058420585205862058720588205892059020591205922059320594205952059620597205982059920600206012060220603206042060520606206072060820609206102061120612206132061420615206162061720618206192062020621206222062320624206252062620627206282062920630206312063220633206342063520636206372063820639206402064120642206432064420645206462064720648206492065020651206522065320654206552065620657206582065920660206612066220663206642066520666206672066820669206702067120672206732067420675206762067720678206792068020681206822068320684206852068620687206882068920690206912069220693206942069520696206972069820699207002070120702207032070420705207062070720708207092071020711207122071320714207152071620717207182071920720207212072220723207242072520726207272072820729207302073120732207332073420735207362073720738207392074020741207422074320744207452074620747207482074920750207512075220753207542075520756207572075820759207602076120762207632076420765207662076720768207692077020771207722077320774207752077620777207782077920780207812078220783207842078520786207872078820789207902079120792207932079420795207962079720798207992080020801208022080320804208052080620807208082080920810208112081220813208142081520816208172081820819208202082120822208232082420825208262082720828208292083020831208322083320834208352083620837208382083920840208412084220843208442084520846208472084820849208502085120852208532085420855208562085720858208592086020861208622086320864208652086620867208682086920870208712087220873208742087520876208772087820879208802088120882208832088420885208862088720888208892089020891208922089320894208952089620897208982089920900209012090220903209042090520906209072090820909209102091120912209132091420915209162091720918209192092020921209222092320924209252092620927209282092920930209312093220933209342093520936209372093820939209402094120942209432094420945209462094720948209492095020951209522095320954209552095620957209582095920960209612096220963209642096520966209672096820969209702097120972209732097420975209762097720978209792098020981209822098320984209852098620987209882098920990209912099220993209942099520996209972099820999210002100121002210032100421005210062100721008210092101021011210122101321014210152101621017210182101921020210212102221023210242102521026210272102821029210302103121032210332103421035210362103721038210392104021041210422104321044210452104621047210482104921050210512105221053210542105521056210572105821059210602106121062210632106421065210662106721068210692107021071210722107321074210752107621077210782107921080210812108221083210842108521086210872108821089210902109121092210932109421095210962109721098210992110021101211022110321104211052110621107211082110921110211112111221113211142111521116211172111821119211202112121122211232112421125211262112721128211292113021131211322113321134211352113621137211382113921140211412114221143211442114521146211472114821149211502115121152211532115421155211562115721158211592116021161211622116321164211652116621167211682116921170211712117221173211742117521176211772117821179211802118121182211832118421185211862118721188211892119021191211922119321194211952119621197211982119921200212012120221203212042120521206212072120821209212102121121212212132121421215212162121721218212192122021221212222122321224212252122621227212282122921230212312123221233212342123521236212372123821239212402124121242212432124421245212462124721248212492125021251212522125321254212552125621257212582125921260212612126221263212642126521266212672126821269212702127121272212732127421275212762127721278212792128021281212822128321284212852128621287212882128921290212912129221293212942129521296212972129821299213002130121302213032130421305213062130721308213092131021311213122131321314213152131621317213182131921320213212132221323213242132521326213272132821329213302133121332213332133421335213362133721338213392134021341213422134321344213452134621347213482134921350213512135221353213542135521356213572135821359213602136121362213632136421365213662136721368213692137021371213722137321374213752137621377213782137921380213812138221383213842138521386213872138821389213902139121392213932139421395213962139721398213992140021401214022140321404214052140621407214082140921410214112141221413214142141521416214172141821419214202142121422214232142421425214262142721428214292143021431214322143321434214352143621437214382143921440214412144221443214442144521446214472144821449214502145121452214532145421455214562145721458214592146021461214622146321464214652146621467214682146921470214712147221473214742147521476214772147821479214802148121482214832148421485214862148721488214892149021491214922149321494214952149621497214982149921500215012150221503215042150521506215072150821509215102151121512215132151421515215162151721518215192152021521215222152321524215252152621527215282152921530215312153221533215342153521536215372153821539215402154121542215432154421545215462154721548215492155021551215522155321554215552155621557215582155921560215612156221563215642156521566215672156821569215702157121572215732157421575215762157721578215792158021581215822158321584215852158621587215882158921590215912159221593215942159521596215972159821599216002160121602216032160421605216062160721608216092161021611216122161321614216152161621617216182161921620216212162221623216242162521626216272162821629216302163121632216332163421635216362163721638216392164021641216422164321644216452164621647216482164921650216512165221653216542165521656216572165821659216602166121662216632166421665216662166721668216692167021671216722167321674216752167621677216782167921680216812168221683216842168521686216872168821689216902169121692216932169421695216962169721698216992170021701217022170321704217052170621707217082170921710217112171221713217142171521716217172171821719217202172121722217232172421725217262172721728217292173021731217322173321734217352173621737217382173921740217412174221743217442174521746217472174821749217502175121752217532175421755217562175721758217592176021761217622176321764217652176621767217682176921770217712177221773217742177521776217772177821779217802178121782217832178421785217862178721788217892179021791217922179321794217952179621797217982179921800218012180221803218042180521806218072180821809218102181121812218132181421815218162181721818218192182021821218222182321824218252182621827218282182921830218312183221833218342183521836218372183821839218402184121842218432184421845218462184721848218492185021851218522185321854218552185621857218582185921860218612186221863218642186521866218672186821869218702187121872218732187421875218762187721878218792188021881218822188321884218852188621887218882188921890218912189221893218942189521896218972189821899219002190121902219032190421905219062190721908219092191021911219122191321914219152191621917219182191921920219212192221923219242192521926219272192821929219302193121932219332193421935219362193721938219392194021941219422194321944219452194621947219482194921950219512195221953219542195521956219572195821959219602196121962219632196421965219662196721968219692197021971219722197321974219752197621977219782197921980219812198221983219842198521986219872198821989219902199121992219932199421995219962199721998219992200022001220022200322004220052200622007220082200922010220112201222013220142201522016220172201822019220202202122022220232202422025220262202722028220292203022031220322203322034220352203622037220382203922040220412204222043220442204522046220472204822049220502205122052220532205422055220562205722058220592206022061220622206322064220652206622067220682206922070220712207222073220742207522076220772207822079220802208122082220832208422085220862208722088220892209022091220922209322094220952209622097220982209922100221012210222103221042210522106221072210822109221102211122112221132211422115221162211722118221192212022121221222212322124221252212622127221282212922130221312213222133221342213522136221372213822139221402214122142221432214422145221462214722148221492215022151221522215322154221552215622157221582215922160221612216222163221642216522166221672216822169221702217122172221732217422175221762217722178221792218022181221822218322184221852218622187221882218922190221912219222193221942219522196221972219822199222002220122202222032220422205222062220722208222092221022211222122221322214222152221622217222182221922220222212222222223222242222522226222272222822229222302223122232222332223422235222362223722238222392224022241222422224322244222452224622247222482224922250222512225222253222542225522256222572225822259222602226122262222632226422265222662226722268222692227022271222722227322274222752227622277222782227922280222812228222283222842228522286222872228822289222902229122292222932229422295222962229722298222992230022301223022230322304223052230622307223082230922310223112231222313223142231522316223172231822319223202232122322223232232422325223262232722328223292233022331223322233322334223352233622337223382233922340223412234222343223442234522346223472234822349223502235122352223532235422355223562235722358223592236022361223622236322364223652236622367223682236922370223712237222373223742237522376223772237822379223802238122382223832238422385223862238722388223892239022391223922239322394223952239622397223982239922400224012240222403224042240522406224072240822409224102241122412224132241422415224162241722418224192242022421224222242322424224252242622427224282242922430224312243222433224342243522436224372243822439224402244122442224432244422445224462244722448224492245022451224522245322454224552245622457224582245922460224612246222463224642246522466224672246822469224702247122472224732247422475224762247722478224792248022481224822248322484224852248622487224882248922490 |
- /**
- * llama.cpp - commit 3f1ae2e32cde00c39b96be6d01c2997c29bae555 - do not edit this file
- *
- * MIT License
- *
- * Copyright (c) 2023-2024 The ggml authors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
- #include "llama-impl.h"
- #include "llama-vocab.h"
- #include "llama-sampling.h"
- #include "unicode.h"
- #include "ggml.h"
- #include "ggml-alloc.h"
- #include "ggml-backend.h"
- #ifdef GGML_USE_RPC
- # include "ggml-rpc.h"
- #endif
- #ifdef GGML_USE_CUDA
- # include "ggml-cuda.h"
- #elif defined(GGML_USE_VULKAN)
- # include "ggml-vulkan.h"
- #elif defined(GGML_USE_SYCL)
- # include "ggml-sycl.h"
- #elif defined(GGML_USE_KOMPUTE)
- # include "ggml-kompute.h"
- #elif defined(GGML_USE_CANN)
- # include "ggml-cann.h"
- #endif
- #ifdef GGML_USE_BLAS
- # include "ggml-blas.h"
- #endif
- #ifdef GGML_USE_METAL
- # include "ggml-metal.h"
- #endif
- // TODO: replace with ggml API call
- #define QK_K 256
- #ifdef __has_include
- #if __has_include(<unistd.h>)
- #include <unistd.h>
- #if defined(_POSIX_MAPPED_FILES)
- #include <sys/mman.h>
- #include <fcntl.h>
- #endif
- #if defined(_POSIX_MEMLOCK_RANGE)
- #include <sys/resource.h>
- #endif
- #endif
- #endif
- #if defined(_WIN32)
- #define WIN32_LEAN_AND_MEAN
- #ifndef NOMINMAX
- #define NOMINMAX
- #endif
- #include <windows.h>
- #ifndef PATH_MAX
- #define PATH_MAX MAX_PATH
- #endif
- #include <io.h>
- #endif
- #if __cplusplus >= 202000L
- #define LU8(x) (const char*)(u8##x)
- #else
- #define LU8(x) u8##x
- #endif
- #include <algorithm>
- #include <array>
- #include <cassert>
- #include <cctype>
- #include <cfloat>
- #include <cinttypes>
- #include <climits>
- #include <cmath>
- #include <cstdarg>
- #include <cstddef>
- #include <cstdint>
- #include <cstdio>
- #include <cstring>
- #include <ctime>
- #include <fstream>
- #include <functional>
- #include <future>
- #include <initializer_list>
- #include <locale>
- #include <map>
- #include <memory>
- #include <mutex>
- #include <numeric>
- #include <set>
- #include <sstream>
- #include <thread>
- #include <type_traits>
- #include <unordered_map>
- #if defined(_MSC_VER)
- #pragma warning(disable: 4244 4267) // possible loss of data
- #endif
- // bump if necessary
- #define LLAMA_MAX_LAYERS 512
- #define LLAMA_MAX_EXPERTS 160 // DeepSeekV2
- //
- // helpers
- //
- // trim whitespace from the beginning and end of a string
- static std::string trim(const std::string & str) {
- size_t start = 0;
- size_t end = str.size();
- while (start < end && isspace(str[start])) {
- start += 1;
- }
- while (end > start && isspace(str[end - 1])) {
- end -= 1;
- }
- return str.substr(start, end - start);
- }
- static bool is_float_close(float a, float b, float abs_tol) {
- // Check for non-negative tolerance
- if (abs_tol < 0.0) {
- throw std::invalid_argument("Tolerance must be non-negative");
- }
- // Exact equality check
- if (a == b) {
- return true;
- }
- // Check for infinities
- if (std::isinf(a) || std::isinf(b)) {
- return false;
- }
- // Regular comparison using the provided absolute tolerance
- return std::fabs(b - a) <= abs_tol;
- }
- static void zeros(std::ofstream & file, size_t n) {
- char zero = 0;
- for (size_t i = 0; i < n; ++i) {
- file.write(&zero, 1);
- }
- }
- LLAMA_ATTRIBUTE_FORMAT(1, 2)
- static std::string format(const char * fmt, ...) {
- va_list ap;
- va_list ap2;
- va_start(ap, fmt);
- va_copy(ap2, ap);
- int size = vsnprintf(NULL, 0, fmt, ap);
- GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
- std::vector<char> buf(size + 1);
- int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
- GGML_ASSERT(size2 == size);
- va_end(ap2);
- va_end(ap);
- return std::string(buf.data(), size);
- }
- //
- // gguf constants (sync with gguf.py)
- //
- enum llm_arch {
- LLM_ARCH_LLAMA,
- LLM_ARCH_MLLAMA,
- LLM_ARCH_FALCON,
- LLM_ARCH_BAICHUAN,
- LLM_ARCH_GROK,
- LLM_ARCH_GPT2,
- LLM_ARCH_GPTJ,
- LLM_ARCH_GPTNEOX,
- LLM_ARCH_MPT,
- LLM_ARCH_STARCODER,
- LLM_ARCH_REFACT,
- LLM_ARCH_BERT,
- LLM_ARCH_NOMIC_BERT,
- LLM_ARCH_JINA_BERT_V2,
- LLM_ARCH_BLOOM,
- LLM_ARCH_STABLELM,
- LLM_ARCH_QWEN,
- LLM_ARCH_QWEN2,
- LLM_ARCH_QWEN2MOE,
- LLM_ARCH_PHI2,
- LLM_ARCH_PHI3,
- LLM_ARCH_PLAMO,
- LLM_ARCH_CODESHELL,
- LLM_ARCH_ORION,
- LLM_ARCH_INTERNLM2,
- LLM_ARCH_MINICPM,
- LLM_ARCH_MINICPM3,
- LLM_ARCH_GEMMA,
- LLM_ARCH_GEMMA2,
- LLM_ARCH_STARCODER2,
- LLM_ARCH_MAMBA,
- LLM_ARCH_XVERSE,
- LLM_ARCH_COMMAND_R,
- LLM_ARCH_DBRX,
- LLM_ARCH_OLMO,
- LLM_ARCH_OLMOE,
- LLM_ARCH_OPENELM,
- LLM_ARCH_ARCTIC,
- LLM_ARCH_DEEPSEEK2,
- LLM_ARCH_CHATGLM,
- LLM_ARCH_BITNET,
- LLM_ARCH_T5,
- LLM_ARCH_T5ENCODER,
- LLM_ARCH_JAIS,
- LLM_ARCH_NEMOTRON,
- LLM_ARCH_EXAONE,
- LLM_ARCH_RWKV6,
- LLM_ARCH_GRANITE,
- LLM_ARCH_GRANITE_MOE,
- LLM_ARCH_CHAMELEON,
- LLM_ARCH_SOLAR,
- LLM_ARCH_UNKNOWN,
- };
- static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
- { LLM_ARCH_LLAMA, "llama" },
- { LLM_ARCH_MLLAMA, "mllama" },
- { LLM_ARCH_FALCON, "falcon" },
- { LLM_ARCH_GROK, "grok" },
- { LLM_ARCH_GPT2, "gpt2" },
- { LLM_ARCH_GPTJ, "gptj" },
- { LLM_ARCH_GPTNEOX, "gptneox" },
- { LLM_ARCH_MPT, "mpt" },
- { LLM_ARCH_BAICHUAN, "baichuan" },
- { LLM_ARCH_STARCODER, "starcoder" },
- { LLM_ARCH_REFACT, "refact" },
- { LLM_ARCH_BERT, "bert" },
- { LLM_ARCH_NOMIC_BERT, "nomic-bert" },
- { LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" },
- { LLM_ARCH_BLOOM, "bloom" },
- { LLM_ARCH_STABLELM, "stablelm" },
- { LLM_ARCH_QWEN, "qwen" },
- { LLM_ARCH_QWEN2, "qwen2" },
- { LLM_ARCH_QWEN2MOE, "qwen2moe" },
- { LLM_ARCH_PHI2, "phi2" },
- { LLM_ARCH_PHI3, "phi3" },
- { LLM_ARCH_PLAMO, "plamo" },
- { LLM_ARCH_CODESHELL, "codeshell" },
- { LLM_ARCH_ORION, "orion" },
- { LLM_ARCH_INTERNLM2, "internlm2" },
- { LLM_ARCH_MINICPM, "minicpm" },
- { LLM_ARCH_MINICPM3, "minicpm3" },
- { LLM_ARCH_GEMMA, "gemma" },
- { LLM_ARCH_GEMMA2, "gemma2" },
- { LLM_ARCH_STARCODER2, "starcoder2" },
- { LLM_ARCH_MAMBA, "mamba" },
- { LLM_ARCH_XVERSE, "xverse" },
- { LLM_ARCH_COMMAND_R, "command-r" },
- { LLM_ARCH_DBRX, "dbrx" },
- { LLM_ARCH_OLMO, "olmo" },
- { LLM_ARCH_OLMOE, "olmoe" },
- { LLM_ARCH_OPENELM, "openelm" },
- { LLM_ARCH_ARCTIC, "arctic" },
- { LLM_ARCH_DEEPSEEK2, "deepseek2" },
- { LLM_ARCH_CHATGLM, "chatglm" },
- { LLM_ARCH_BITNET, "bitnet" },
- { LLM_ARCH_T5, "t5" },
- { LLM_ARCH_T5ENCODER, "t5encoder" },
- { LLM_ARCH_JAIS, "jais" },
- { LLM_ARCH_NEMOTRON, "nemotron" },
- { LLM_ARCH_EXAONE, "exaone" },
- { LLM_ARCH_RWKV6, "rwkv6" },
- { LLM_ARCH_GRANITE, "granite" },
- { LLM_ARCH_GRANITE_MOE, "granitemoe" },
- { LLM_ARCH_CHAMELEON, "chameleon" },
- { LLM_ARCH_SOLAR, "solar" },
- { LLM_ARCH_UNKNOWN, "(unknown)" },
- };
- enum llm_kv {
- LLM_KV_GENERAL_TYPE,
- LLM_KV_GENERAL_ARCHITECTURE,
- LLM_KV_GENERAL_QUANTIZATION_VERSION,
- LLM_KV_GENERAL_ALIGNMENT,
- LLM_KV_GENERAL_NAME,
- LLM_KV_GENERAL_AUTHOR,
- LLM_KV_GENERAL_VERSION,
- LLM_KV_GENERAL_URL,
- LLM_KV_GENERAL_DESCRIPTION,
- LLM_KV_GENERAL_LICENSE,
- LLM_KV_GENERAL_SOURCE_URL,
- LLM_KV_GENERAL_SOURCE_HF_REPO,
- LLM_KV_VOCAB_SIZE,
- LLM_KV_CONTEXT_LENGTH,
- LLM_KV_EMBEDDING_LENGTH,
- LLM_KV_BLOCK_COUNT,
- LLM_KV_LEADING_DENSE_BLOCK_COUNT,
- LLM_KV_FEED_FORWARD_LENGTH,
- LLM_KV_EXPERT_FEED_FORWARD_LENGTH,
- LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH,
- LLM_KV_USE_PARALLEL_RESIDUAL,
- LLM_KV_TENSOR_DATA_LAYOUT,
- LLM_KV_EXPERT_COUNT,
- LLM_KV_EXPERT_USED_COUNT,
- LLM_KV_EXPERT_SHARED_COUNT,
- LLM_KV_EXPERT_WEIGHTS_SCALE,
- LLM_KV_POOLING_TYPE,
- LLM_KV_LOGIT_SCALE,
- LLM_KV_DECODER_START_TOKEN_ID,
- LLM_KV_ATTN_LOGIT_SOFTCAPPING,
- LLM_KV_FINAL_LOGIT_SOFTCAPPING,
- LLM_KV_SWIN_NORM,
- LLM_KV_RESCALE_EVERY_N_LAYERS,
- LLM_KV_TIME_MIX_EXTRA_DIM,
- LLM_KV_TIME_DECAY_EXTRA_DIM,
- LLM_KV_RESIDUAL_SCALE,
- LLM_KV_EMBEDDING_SCALE,
- LLM_KV_ATTENTION_HEAD_COUNT,
- LLM_KV_ATTENTION_HEAD_COUNT_KV,
- LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
- LLM_KV_ATTENTION_CLAMP_KQV,
- LLM_KV_ATTENTION_KEY_LENGTH,
- LLM_KV_ATTENTION_VALUE_LENGTH,
- LLM_KV_ATTENTION_LAYERNORM_EPS,
- LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
- LLM_KV_ATTENTION_CAUSAL,
- LLM_KV_ATTENTION_Q_LORA_RANK,
- LLM_KV_ATTENTION_KV_LORA_RANK,
- LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT,
- LLM_KV_ATTENTION_SLIDING_WINDOW,
- LLM_KV_ATTENTION_SCALE,
- LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION,
- LLM_KV_ATTENTION_CROSS_ATTENTION_LAYERS,
- LLM_KV_ROPE_DIMENSION_COUNT,
- LLM_KV_ROPE_FREQ_BASE,
- LLM_KV_ROPE_SCALE_LINEAR,
- LLM_KV_ROPE_SCALING_TYPE,
- LLM_KV_ROPE_SCALING_FACTOR,
- LLM_KV_ROPE_SCALING_ATTN_FACTOR,
- LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
- LLM_KV_ROPE_SCALING_FINETUNED,
- LLM_KV_ROPE_SCALING_YARN_LOG_MUL,
- LLM_KV_SPLIT_NO,
- LLM_KV_SPLIT_COUNT,
- LLM_KV_SPLIT_TENSORS_COUNT,
- LLM_KV_SSM_INNER_SIZE,
- LLM_KV_SSM_CONV_KERNEL,
- LLM_KV_SSM_STATE_SIZE,
- LLM_KV_SSM_TIME_STEP_RANK,
- LLM_KV_SSM_DT_B_C_RMS,
- LLM_KV_WKV_HEAD_SIZE,
- LLM_KV_TOKENIZER_MODEL,
- LLM_KV_TOKENIZER_PRE,
- LLM_KV_TOKENIZER_LIST,
- LLM_KV_TOKENIZER_TOKEN_TYPE,
- LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,
- LLM_KV_TOKENIZER_SCORES,
- LLM_KV_TOKENIZER_MERGES,
- LLM_KV_TOKENIZER_BOS_ID,
- LLM_KV_TOKENIZER_EOS_ID,
- LLM_KV_TOKENIZER_UNK_ID,
- LLM_KV_TOKENIZER_SEP_ID,
- LLM_KV_TOKENIZER_PAD_ID,
- LLM_KV_TOKENIZER_CLS_ID,
- LLM_KV_TOKENIZER_MASK_ID,
- LLM_KV_TOKENIZER_ADD_BOS,
- LLM_KV_TOKENIZER_ADD_EOS,
- LLM_KV_TOKENIZER_ADD_PREFIX,
- LLM_KV_TOKENIZER_REMOVE_EXTRA_WS,
- LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP,
- LLM_KV_TOKENIZER_HF_JSON,
- LLM_KV_TOKENIZER_RWKV,
- LLM_KV_TOKENIZER_PREFIX_ID,
- LLM_KV_TOKENIZER_SUFFIX_ID,
- LLM_KV_TOKENIZER_MIDDLE_ID,
- LLM_KV_TOKENIZER_EOT_ID,
- LLM_KV_TOKENIZER_EOM_ID,
- LLM_KV_ADAPTER_TYPE,
- LLM_KV_ADAPTER_LORA_ALPHA,
- };
- static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
- { LLM_KV_GENERAL_TYPE, "general.type" },
- { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
- { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
- { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
- { LLM_KV_GENERAL_NAME, "general.name" },
- { LLM_KV_GENERAL_AUTHOR, "general.author" },
- { LLM_KV_GENERAL_VERSION, "general.version" },
- { LLM_KV_GENERAL_URL, "general.url" },
- { LLM_KV_GENERAL_DESCRIPTION, "general.description" },
- { LLM_KV_GENERAL_LICENSE, "general.license" },
- { LLM_KV_GENERAL_SOURCE_URL, "general.source.url" },
- { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source.huggingface.repository" },
- { LLM_KV_VOCAB_SIZE, "%s.vocab_size" },
- { LLM_KV_CONTEXT_LENGTH, "%s.context_length" },
- { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" },
- { LLM_KV_BLOCK_COUNT, "%s.block_count" },
- { LLM_KV_LEADING_DENSE_BLOCK_COUNT, "%s.leading_dense_block_count" },
- { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" },
- { LLM_KV_EXPERT_FEED_FORWARD_LENGTH, "%s.expert_feed_forward_length" },
- { LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, "%s.expert_shared_feed_forward_length" },
- { LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" },
- { LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
- { LLM_KV_EXPERT_COUNT, "%s.expert_count" },
- { LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" },
- { LLM_KV_EXPERT_SHARED_COUNT, "%s.expert_shared_count" },
- { LLM_KV_EXPERT_WEIGHTS_SCALE, "%s.expert_weights_scale" },
- { LLM_KV_POOLING_TYPE, "%s.pooling_type" },
- { LLM_KV_LOGIT_SCALE, "%s.logit_scale" },
- { LLM_KV_DECODER_START_TOKEN_ID, "%s.decoder_start_token_id" },
- { LLM_KV_ATTN_LOGIT_SOFTCAPPING, "%s.attn_logit_softcapping" },
- { LLM_KV_FINAL_LOGIT_SOFTCAPPING, "%s.final_logit_softcapping" },
- { LLM_KV_SWIN_NORM, "%s.swin_norm" },
- { LLM_KV_RESCALE_EVERY_N_LAYERS, "%s.rescale_every_n_layers" },
- { LLM_KV_TIME_MIX_EXTRA_DIM, "%s.time_mix_extra_dim" },
- { LLM_KV_TIME_DECAY_EXTRA_DIM, "%s.time_decay_extra_dim" },
- { LLM_KV_RESIDUAL_SCALE, "%s.residual_scale" },
- { LLM_KV_EMBEDDING_SCALE, "%s.embedding_scale" },
- { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
- { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
- { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" },
- { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" },
- { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" },
- { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" },
- { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
- { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
- { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" },
- { LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" },
- { LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" },
- { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" },
- { LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" },
- { LLM_KV_ATTENTION_SCALE, "%s.attention.scale" },
- { LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION, "%s.attention.block_skip_connection.%d" },
- { LLM_KV_ATTENTION_CROSS_ATTENTION_LAYERS, "%s.attention.cross_attention_layers" },
- { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
- { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
- { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" },
- { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" },
- { LLM_KV_ROPE_SCALING_FACTOR, "%s.rope.scaling.factor" },
- { LLM_KV_ROPE_SCALING_ATTN_FACTOR, "%s.rope.scaling.attn_factor" },
- { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" },
- { LLM_KV_ROPE_SCALING_FINETUNED, "%s.rope.scaling.finetuned" },
- { LLM_KV_ROPE_SCALING_YARN_LOG_MUL, "%s.rope.scaling.yarn_log_multiplier" },
- { LLM_KV_SPLIT_NO, "split.no" },
- { LLM_KV_SPLIT_COUNT, "split.count" },
- { LLM_KV_SPLIT_TENSORS_COUNT, "split.tensors.count" },
- { LLM_KV_SSM_CONV_KERNEL, "%s.ssm.conv_kernel" },
- { LLM_KV_SSM_INNER_SIZE, "%s.ssm.inner_size" },
- { LLM_KV_SSM_STATE_SIZE, "%s.ssm.state_size" },
- { LLM_KV_SSM_TIME_STEP_RANK, "%s.ssm.time_step_rank" },
- { LLM_KV_SSM_DT_B_C_RMS, "%s.ssm.dt_b_c_rms" },
- { LLM_KV_WKV_HEAD_SIZE, "%s.wkv.head_size" },
- { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" },
- { LLM_KV_TOKENIZER_PRE, "tokenizer.ggml.pre" },
- { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" },
- { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" },
- { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, "tokenizer.ggml.token_type_count" },
- { LLM_KV_TOKENIZER_SCORES, "tokenizer.ggml.scores" },
- { LLM_KV_TOKENIZER_MERGES, "tokenizer.ggml.merges" },
- { LLM_KV_TOKENIZER_BOS_ID, "tokenizer.ggml.bos_token_id" },
- { LLM_KV_TOKENIZER_EOS_ID, "tokenizer.ggml.eos_token_id" },
- { LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" },
- { LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" },
- { LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" },
- { LLM_KV_TOKENIZER_CLS_ID, "tokenizer.ggml.cls_token_id" },
- { LLM_KV_TOKENIZER_MASK_ID, "tokenizer.ggml.mask_token_id" },
- { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" },
- { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" },
- { LLM_KV_TOKENIZER_ADD_PREFIX, "tokenizer.ggml.add_space_prefix" },
- { LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, "tokenizer.ggml.remove_extra_whitespaces" },
- { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap" },
- { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
- { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
- { LLM_KV_TOKENIZER_PREFIX_ID, "tokenizer.ggml.prefix_token_id" },
- { LLM_KV_TOKENIZER_SUFFIX_ID, "tokenizer.ggml.suffix_token_id" },
- { LLM_KV_TOKENIZER_MIDDLE_ID, "tokenizer.ggml.middle_token_id" },
- { LLM_KV_TOKENIZER_EOT_ID, "tokenizer.ggml.eot_token_id" },
- { LLM_KV_TOKENIZER_EOM_ID, "tokenizer.ggml.eom_token_id" },
- { LLM_KV_ADAPTER_TYPE, "adapter.type" },
- { LLM_KV_ADAPTER_LORA_ALPHA, "adapter.lora.alpha" },
- };
- struct LLM_KV {
- LLM_KV(llm_arch arch) : arch(arch) {}
- llm_arch arch;
- std::string operator()(llm_kv kv) const {
- return ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));
- }
- };
- enum llm_tensor {
- LLM_TENSOR_TOKEN_EMBD,
- LLM_TENSOR_TOKEN_EMBD_NORM,
- LLM_TENSOR_TOKEN_TYPES,
- LLM_TENSOR_POS_EMBD,
- LLM_TENSOR_OUTPUT,
- LLM_TENSOR_OUTPUT_NORM,
- LLM_TENSOR_ROPE_FREQS,
- LLM_TENSOR_ROPE_FACTORS_LONG,
- LLM_TENSOR_ROPE_FACTORS_SHORT,
- LLM_TENSOR_ATTN_Q,
- LLM_TENSOR_ATTN_K,
- LLM_TENSOR_ATTN_V,
- LLM_TENSOR_ATTN_QKV,
- LLM_TENSOR_ATTN_OUT,
- LLM_TENSOR_ATTN_NORM,
- LLM_TENSOR_ATTN_NORM_2,
- LLM_TENSOR_ATTN_OUT_NORM,
- LLM_TENSOR_ATTN_POST_NORM,
- LLM_TENSOR_ATTN_ROT_EMBD,
- LLM_TENSOR_FFN_GATE_INP,
- LLM_TENSOR_FFN_GATE_INP_SHEXP,
- LLM_TENSOR_FFN_NORM,
- LLM_TENSOR_FFN_POST_NORM,
- LLM_TENSOR_FFN_GATE,
- LLM_TENSOR_FFN_DOWN,
- LLM_TENSOR_FFN_UP,
- LLM_TENSOR_FFN_ACT,
- LLM_TENSOR_FFN_DOWN_EXP, // split experts for backward compatibility
- LLM_TENSOR_FFN_GATE_EXP,
- LLM_TENSOR_FFN_UP_EXP,
- LLM_TENSOR_FFN_NORM_EXPS,
- LLM_TENSOR_FFN_DOWN_EXPS, // merged experts
- LLM_TENSOR_FFN_GATE_EXPS,
- LLM_TENSOR_FFN_UP_EXPS,
- LLM_TENSOR_FFN_DOWN_SHEXP,
- LLM_TENSOR_FFN_GATE_SHEXP,
- LLM_TENSOR_FFN_UP_SHEXP,
- LLM_TENSOR_ATTN_Q_NORM,
- LLM_TENSOR_ATTN_K_NORM,
- LLM_TENSOR_LAYER_OUT_NORM,
- LLM_TENSOR_SSM_IN,
- LLM_TENSOR_SSM_CONV1D,
- LLM_TENSOR_SSM_X,
- LLM_TENSOR_SSM_DT,
- LLM_TENSOR_SSM_A,
- LLM_TENSOR_SSM_D,
- LLM_TENSOR_SSM_OUT,
- LLM_TENSOR_TIME_MIX_W1,
- LLM_TENSOR_TIME_MIX_W2,
- LLM_TENSOR_TIME_MIX_LERP_X,
- LLM_TENSOR_TIME_MIX_LERP_W,
- LLM_TENSOR_TIME_MIX_LERP_K,
- LLM_TENSOR_TIME_MIX_LERP_V,
- LLM_TENSOR_TIME_MIX_LERP_R,
- LLM_TENSOR_TIME_MIX_LERP_G,
- LLM_TENSOR_TIME_MIX_FIRST,
- LLM_TENSOR_TIME_MIX_DECAY,
- LLM_TENSOR_TIME_MIX_DECAY_W1,
- LLM_TENSOR_TIME_MIX_DECAY_W2,
- LLM_TENSOR_TIME_MIX_KEY,
- LLM_TENSOR_TIME_MIX_VALUE,
- LLM_TENSOR_TIME_MIX_RECEPTANCE,
- LLM_TENSOR_TIME_MIX_GATE,
- LLM_TENSOR_TIME_MIX_LN,
- LLM_TENSOR_TIME_MIX_OUTPUT,
- LLM_TENSOR_CHANNEL_MIX_LERP_K,
- LLM_TENSOR_CHANNEL_MIX_LERP_R,
- LLM_TENSOR_CHANNEL_MIX_KEY,
- LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,
- LLM_TENSOR_CHANNEL_MIX_VALUE,
- LLM_TENSOR_ATTN_Q_A,
- LLM_TENSOR_ATTN_Q_B,
- LLM_TENSOR_ATTN_KV_A_MQA,
- LLM_TENSOR_ATTN_KV_B,
- LLM_TENSOR_ATTN_Q_A_NORM,
- LLM_TENSOR_ATTN_KV_A_NORM,
- LLM_TENSOR_ATTN_SUB_NORM,
- LLM_TENSOR_FFN_SUB_NORM,
- LLM_TENSOR_DEC_ATTN_NORM,
- LLM_TENSOR_DEC_ATTN_Q,
- LLM_TENSOR_DEC_ATTN_K,
- LLM_TENSOR_DEC_ATTN_V,
- LLM_TENSOR_DEC_ATTN_OUT,
- LLM_TENSOR_DEC_ATTN_REL_B,
- LLM_TENSOR_DEC_CROSS_ATTN_NORM,
- LLM_TENSOR_DEC_CROSS_ATTN_Q,
- LLM_TENSOR_DEC_CROSS_ATTN_K,
- LLM_TENSOR_DEC_CROSS_ATTN_V,
- LLM_TENSOR_DEC_CROSS_ATTN_OUT,
- LLM_TENSOR_DEC_CROSS_ATTN_REL_B,
- LLM_TENSOR_DEC_FFN_NORM,
- LLM_TENSOR_DEC_FFN_GATE,
- LLM_TENSOR_DEC_FFN_DOWN,
- LLM_TENSOR_DEC_FFN_UP,
- LLM_TENSOR_DEC_OUTPUT_NORM,
- LLM_TENSOR_ENC_ATTN_NORM,
- LLM_TENSOR_ENC_ATTN_Q,
- LLM_TENSOR_ENC_ATTN_K,
- LLM_TENSOR_ENC_ATTN_V,
- LLM_TENSOR_ENC_ATTN_OUT,
- LLM_TENSOR_ENC_ATTN_REL_B,
- LLM_TENSOR_ENC_FFN_NORM,
- LLM_TENSOR_ENC_FFN_GATE,
- LLM_TENSOR_ENC_FFN_DOWN,
- LLM_TENSOR_ENC_FFN_UP,
- LLM_TENSOR_ENC_OUTPUT_NORM,
- LLM_TENSOR_CLS,
- LLM_TENSOR_CLS_OUT,
- LLM_TENSOR_BSKCN_TV,
- LLM_TENSOR_CROSS_ATTN_K_NORM,
- LLM_TENSOR_CROSS_ATTN_K_PROJ,
- LLM_TENSOR_CROSS_ATTN_O_PROJ,
- LLM_TENSOR_CROSS_ATTN_Q_NORM,
- LLM_TENSOR_CROSS_ATTN_Q_PROJ,
- LLM_TENSOR_CROSS_ATTN_V_PROJ,
- LLM_TENSOR_CROSS_ATTN_ATTN_GATE,
- LLM_TENSOR_CROSS_ATTN_MLP_GATE,
- };
- static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
- {
- LLM_ARCH_LLAMA,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
- { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
- { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
- },
- },
- {
- LLM_ARCH_MLLAMA,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
- { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
- { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
- { LLM_TENSOR_CROSS_ATTN_K_NORM, "blk.%d.cross_attn_k_norm" },
- { LLM_TENSOR_CROSS_ATTN_K_PROJ, "blk.%d.cross_attn_k_proj" },
- { LLM_TENSOR_CROSS_ATTN_O_PROJ, "blk.%d.cross_attn_o_proj" },
- { LLM_TENSOR_CROSS_ATTN_Q_NORM, "blk.%d.cross_attn_q_norm" },
- { LLM_TENSOR_CROSS_ATTN_Q_PROJ, "blk.%d.cross_attn_q_proj" },
- { LLM_TENSOR_CROSS_ATTN_V_PROJ, "blk.%d.cross_attn_v_proj" },
- { LLM_TENSOR_CROSS_ATTN_ATTN_GATE, "blk.%d.cross_attn_attn_gate" },
- { LLM_TENSOR_CROSS_ATTN_MLP_GATE, "blk.%d.cross_attn_mlp_gate" },
- },
- },
- {
- LLM_ARCH_BAICHUAN,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_FALCON,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_GROK,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
- { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
- { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
- { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
- { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
- },
- },
- {
- LLM_ARCH_GPT2,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_POS_EMBD, "position_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- },
- },
- {
- LLM_ARCH_GPTJ,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- },
- },
- {
- LLM_ARCH_GPTNEOX,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_MPT,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output"},
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_ACT, "blk.%d.ffn.act" },
- { LLM_TENSOR_POS_EMBD, "position_embd" },
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"},
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"},
- },
- },
- {
- LLM_ARCH_STARCODER,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_POS_EMBD, "position_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- },
- },
- {
- LLM_ARCH_REFACT,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_BERT,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
- { LLM_TENSOR_TOKEN_TYPES, "token_types" },
- { LLM_TENSOR_POS_EMBD, "position_embd" },
- { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_CLS, "cls" },
- { LLM_TENSOR_CLS_OUT, "cls.output" },
- },
- },
- {
- LLM_ARCH_NOMIC_BERT,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
- { LLM_TENSOR_TOKEN_TYPES, "token_types" },
- { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_JINA_BERT_V2,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
- { LLM_TENSOR_TOKEN_TYPES, "token_types" },
- { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
- { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_CLS, "cls" },
- },
- },
- {
- LLM_ARCH_BLOOM,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- },
- },
- {
- LLM_ARCH_STABLELM,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
- },
- },
- {
- LLM_ARCH_QWEN,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_QWEN2,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_QWEN2MOE,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
- { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
- },
- },
- {
- LLM_ARCH_PHI2,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_PHI3,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" },
- { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_PLAMO,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_CODESHELL,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_ORION,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_INTERNLM2,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_MINICPM,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
- { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
- { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
- },
- },
- {
- LLM_ARCH_MINICPM3,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" },
- { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q_A_NORM, "blk.%d.attn_q_a_norm" },
- { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_Q_A, "blk.%d.attn_q_a" },
- { LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" },
- { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" },
- { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- },
- },
- {
- LLM_ARCH_GEMMA,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_GEMMA2,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
- },
- },
- {
- LLM_ARCH_STARCODER2,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_MAMBA,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
- { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
- { LLM_TENSOR_SSM_X, "blk.%d.ssm_x" },
- { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
- { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" },
- { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" },
- { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
- },
- },
- {
- LLM_ARCH_XVERSE,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_COMMAND_R,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
- },
- },
- {
- LLM_ARCH_DBRX,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
- },
- },
- {
- LLM_ARCH_OLMO,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_OLMOE,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
- },
- },
- {
- LLM_ARCH_OPENELM,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_ARCTIC,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_NORM_EXPS, "blk.%d.ffn_norm_exps" },
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
- },
- },
- {
- LLM_ARCH_DEEPSEEK2,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q_A_NORM, "blk.%d.attn_q_a_norm" },
- { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_Q_A, "blk.%d.attn_q_a" },
- { LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" },
- { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" },
- { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
- { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
- },
- },
- {
- LLM_ARCH_CHATGLM,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- },
- },
- {
- LLM_ARCH_BITNET,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_SUB_NORM, "blk.%d.attn_sub_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_SUB_NORM, "blk.%d.ffn_sub_norm" },
- },
- },
- {
- LLM_ARCH_T5,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_DEC_OUTPUT_NORM, "dec.output_norm" },
- { LLM_TENSOR_DEC_ATTN_NORM, "dec.blk.%d.attn_norm" },
- { LLM_TENSOR_DEC_ATTN_Q, "dec.blk.%d.attn_q" },
- { LLM_TENSOR_DEC_ATTN_K, "dec.blk.%d.attn_k" },
- { LLM_TENSOR_DEC_ATTN_V, "dec.blk.%d.attn_v" },
- { LLM_TENSOR_DEC_ATTN_OUT, "dec.blk.%d.attn_o" },
- { LLM_TENSOR_DEC_ATTN_REL_B, "dec.blk.%d.attn_rel_b" },
- { LLM_TENSOR_DEC_CROSS_ATTN_NORM, "dec.blk.%d.cross_attn_norm" },
- { LLM_TENSOR_DEC_CROSS_ATTN_Q, "dec.blk.%d.cross_attn_q" },
- { LLM_TENSOR_DEC_CROSS_ATTN_K, "dec.blk.%d.cross_attn_k" },
- { LLM_TENSOR_DEC_CROSS_ATTN_V, "dec.blk.%d.cross_attn_v" },
- { LLM_TENSOR_DEC_CROSS_ATTN_OUT, "dec.blk.%d.cross_attn_o" },
- { LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "dec.blk.%d.cross_attn_rel_b" },
- { LLM_TENSOR_DEC_FFN_NORM, "dec.blk.%d.ffn_norm" },
- { LLM_TENSOR_DEC_FFN_GATE, "dec.blk.%d.ffn_gate" },
- { LLM_TENSOR_DEC_FFN_DOWN, "dec.blk.%d.ffn_down" },
- { LLM_TENSOR_DEC_FFN_UP, "dec.blk.%d.ffn_up" },
- { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" },
- { LLM_TENSOR_ENC_ATTN_NORM, "enc.blk.%d.attn_norm" },
- { LLM_TENSOR_ENC_ATTN_Q, "enc.blk.%d.attn_q" },
- { LLM_TENSOR_ENC_ATTN_K, "enc.blk.%d.attn_k" },
- { LLM_TENSOR_ENC_ATTN_V, "enc.blk.%d.attn_v" },
- { LLM_TENSOR_ENC_ATTN_OUT, "enc.blk.%d.attn_o" },
- { LLM_TENSOR_ENC_ATTN_REL_B, "enc.blk.%d.attn_rel_b" },
- { LLM_TENSOR_ENC_FFN_NORM, "enc.blk.%d.ffn_norm" },
- { LLM_TENSOR_ENC_FFN_GATE, "enc.blk.%d.ffn_gate" },
- { LLM_TENSOR_ENC_FFN_DOWN, "enc.blk.%d.ffn_down" },
- { LLM_TENSOR_ENC_FFN_UP, "enc.blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_T5ENCODER,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" },
- { LLM_TENSOR_ENC_ATTN_NORM, "enc.blk.%d.attn_norm" },
- { LLM_TENSOR_ENC_ATTN_Q, "enc.blk.%d.attn_q" },
- { LLM_TENSOR_ENC_ATTN_K, "enc.blk.%d.attn_k" },
- { LLM_TENSOR_ENC_ATTN_V, "enc.blk.%d.attn_v" },
- { LLM_TENSOR_ENC_ATTN_OUT, "enc.blk.%d.attn_o" },
- { LLM_TENSOR_ENC_ATTN_REL_B, "enc.blk.%d.attn_rel_b" },
- { LLM_TENSOR_ENC_FFN_NORM, "enc.blk.%d.ffn_norm" },
- { LLM_TENSOR_ENC_FFN_GATE, "enc.blk.%d.ffn_gate" },
- { LLM_TENSOR_ENC_FFN_DOWN, "enc.blk.%d.ffn_down" },
- { LLM_TENSOR_ENC_FFN_UP, "enc.blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_JAIS,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- },
- },
- {
- LLM_ARCH_NEMOTRON,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_EXAONE,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_RWKV6,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
- { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" },
- { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" },
- { LLM_TENSOR_TIME_MIX_LERP_X, "blk.%d.time_mix_lerp_x" },
- { LLM_TENSOR_TIME_MIX_LERP_W, "blk.%d.time_mix_lerp_w" },
- { LLM_TENSOR_TIME_MIX_LERP_K, "blk.%d.time_mix_lerp_k" },
- { LLM_TENSOR_TIME_MIX_LERP_V, "blk.%d.time_mix_lerp_v" },
- { LLM_TENSOR_TIME_MIX_LERP_R, "blk.%d.time_mix_lerp_r" },
- { LLM_TENSOR_TIME_MIX_LERP_G, "blk.%d.time_mix_lerp_g" },
- { LLM_TENSOR_TIME_MIX_FIRST, "blk.%d.time_mix_first" },
- { LLM_TENSOR_TIME_MIX_DECAY, "blk.%d.time_mix_decay" },
- { LLM_TENSOR_TIME_MIX_DECAY_W1, "blk.%d.time_mix_decay_w1" },
- { LLM_TENSOR_TIME_MIX_DECAY_W2, "blk.%d.time_mix_decay_w2" },
- { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" },
- { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" },
- { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" },
- { LLM_TENSOR_TIME_MIX_GATE, "blk.%d.time_mix_gate" },
- { LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" },
- { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" },
- { LLM_TENSOR_CHANNEL_MIX_LERP_K, "blk.%d.channel_mix_lerp_k" },
- { LLM_TENSOR_CHANNEL_MIX_LERP_R, "blk.%d.channel_mix_lerp_r" },
- { LLM_TENSOR_CHANNEL_MIX_KEY, "blk.%d.channel_mix_key" },
- { LLM_TENSOR_CHANNEL_MIX_VALUE, "blk.%d.channel_mix_value" },
- { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "blk.%d.channel_mix_receptance" },
- },
- },
- {
- LLM_ARCH_GRANITE,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- },
- },
- {
- LLM_ARCH_GRANITE_MOE,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
- },
- },
- {
- LLM_ARCH_CHAMELEON,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
- },
- },
- {
- LLM_ARCH_SOLAR,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
- { LLM_TENSOR_OUTPUT, "output" },
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
- { LLM_TENSOR_BSKCN_TV, "bskcn_tv" },
- },
- },
- {
- LLM_ARCH_UNKNOWN,
- {
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
- },
- },
- };
- static llm_arch llm_arch_from_string(const std::string & name) {
- for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
- if (kv.second == name) {
- return kv.first;
- }
- }
- return LLM_ARCH_UNKNOWN;
- }
- // helper to handle gguf constants
- // usage:
- //
- // const auto tn = LLM_TN(LLM_ARCH_LLAMA);
- //
- // std::string name = tn(LLM_TENSOR_OUTPUT); -> "output"
- // std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias"); -> "token_embd.bias"
- // std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3); -> "blk.3.attn_norm.weight"
- //
- struct LLM_TN {
- LLM_TN(llm_arch arch) : arch(arch) {}
- llm_arch arch;
- std::string operator()(llm_tensor tensor) const {
- if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
- return "__missing__";
- }
- return LLM_TENSOR_NAMES.at(arch).at(tensor);
- }
- std::string operator()(llm_tensor tensor, const std::string & suffix) const {
- if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
- return "__missing__";
- }
- return LLM_TENSOR_NAMES.at(arch).at(tensor) + "." + suffix;
- }
- std::string operator()(llm_tensor tensor, int bid) const {
- if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
- return "__missing__";
- }
- return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid);
- }
- std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
- if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
- return "__missing__";
- }
- return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid) + "." + suffix;
- }
- std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
- if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
- return "__missing__";
- }
- return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid, xid) + "." + suffix;
- }
- };
- //
- // gguf helpers
- //
- static const std::map<llama_rope_scaling_type, const char *> LLAMA_ROPE_SCALING_TYPES = {
- { LLAMA_ROPE_SCALING_TYPE_NONE, "none" },
- { LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" },
- { LLAMA_ROPE_SCALING_TYPE_YARN, "yarn" },
- };
- static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) {
- for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
- if (kv.second == name) {
- return (llama_rope_scaling_type) kv.first;
- }
- }
- return LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
- }
- static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
- switch (type) {
- case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
- case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
- case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
- case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
- case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
- case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
- case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
- case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
- case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
- case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
- case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
- default: return format("unknown type %d", type);
- }
- }
- static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
- const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
- switch (type) {
- case GGUF_TYPE_STRING:
- return gguf_get_val_str(ctx_gguf, i);
- case GGUF_TYPE_ARRAY:
- {
- const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
- int arr_n = gguf_get_arr_n(ctx_gguf, i);
- const void * data = gguf_get_arr_data(ctx_gguf, i);
- std::stringstream ss;
- ss << "[";
- for (int j = 0; j < arr_n; j++) {
- if (arr_type == GGUF_TYPE_STRING) {
- std::string val = gguf_get_arr_str(ctx_gguf, i, j);
- // escape quotes
- replace_all(val, "\\", "\\\\");
- replace_all(val, "\"", "\\\"");
- ss << '"' << val << '"';
- } else if (arr_type == GGUF_TYPE_ARRAY) {
- ss << "???";
- } else {
- ss << gguf_data_to_str(arr_type, data, j);
- }
- if (j < arr_n - 1) {
- ss << ", ";
- }
- }
- ss << "]";
- return ss.str();
- }
- default:
- return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
- }
- }
- //
- // llama helpers
- //
- #if defined(_WIN32)
- static std::string llama_format_win_err(DWORD err) {
- LPSTR buf;
- size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
- if (!size) {
- return "FormatMessageA failed";
- }
- std::string ret(buf, size);
- LocalFree(buf);
- return ret;
- }
- #endif
- template <typename T>
- struct no_init {
- T value;
- no_init() { /* do nothing */ }
- };
- struct llama_file {
- #if defined(_WIN32)
- // use FILE * so we don't have to re-open the file to mmap
- FILE * fp;
- HANDLE fp_win32;
- size_t size;
- private:
- std::string GetErrorMessageWin32(DWORD error_code) const {
- std::string ret;
- LPSTR lpMsgBuf = NULL;
- DWORD bufLen = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&lpMsgBuf, 0, NULL);
- if (!bufLen) {
- ret = format("Win32 error code: %s", error_code);
- } else {
- ret = lpMsgBuf;
- LocalFree(lpMsgBuf);
- }
- return ret;
- }
- public:
- llama_file(const char * fname, const char * mode) {
- fp = ggml_fopen(fname, mode);
- if (fp == NULL) {
- throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
- }
- fp_win32 = (HANDLE) _get_osfhandle(_fileno(fp));
- seek(0, SEEK_END);
- size = tell();
- seek(0, SEEK_SET);
- }
- size_t tell() const {
- // SetFilePointerEx returns the current position when seeking relative 0 bytes
- LARGE_INTEGER li;
- li.QuadPart = 0;
- BOOL ret = SetFilePointerEx(fp_win32, li, &li, FILE_CURRENT);
- if (!ret) {
- throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
- }
- return li.QuadPart;
- }
- void seek(size_t offset, int whence) const {
- // no need to convert SEEK_* to FILE_*. The enums are the same.
- // Still, keep static asserts to avoid failures in the future.
- static_assert(SEEK_SET == FILE_BEGIN, "SEEK_SET != FILE_BEGIN");
- static_assert(SEEK_CUR == FILE_CURRENT, "SEEK_CUR != FILE_CURRENT");
- static_assert(SEEK_END == FILE_END, "SEEK_END != FILE_END");
- LARGE_INTEGER li;
- li.QuadPart = offset;
- BOOL ret = SetFilePointerEx(fp_win32, li, NULL, whence);
- if (!ret) {
- throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
- }
- }
- void read_raw(void * ptr, size_t len) const {
- // On Win32 ReadFile is significant faster than fread which is again significant faster than std::fstream. Thus
- // use the Win32 API to do file io instead of the C/C++ library functions.
- // There are conditions under which ReadFile cannot read chunks >64MB.
- // Thus split the operation into smaller chunks if len exceeds this limit.
- size_t bytes_read = 0;
- while (bytes_read < len) {
- size_t chunk_size = std::min<size_t>(len - bytes_read, 64*1024*1024);
- DWORD chunk_read = 0;
- BOOL result = ReadFile(fp_win32, reinterpret_cast<char*>(ptr) + bytes_read, chunk_size, &chunk_read, NULL);
- if (!result) {
- throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
- }
- if (chunk_read < chunk_size || chunk_read == 0) {
- throw std::runtime_error("unexpectedly reached end of file");
- }
- bytes_read += chunk_read;
- } ;
- }
- uint32_t read_u32() const {
- uint32_t val;
- read_raw(&val, sizeof(val));
- return val;
- }
- void write_raw(const void * ptr, size_t len) const {
- // There are conditions under which WriteFile cannot write chunks >64MB.
- // Thus split the operation into smaller chunks if len exceeds this limit.
- size_t bytes_written = 0;
- while (bytes_written < len) {
- size_t chunk_size = std::min<size_t>(len - bytes_written, 64*1024*1024);
- DWORD chunk_written = 0;
- BOOL result = WriteFile(fp_win32, reinterpret_cast<char const*>(ptr) + bytes_written, chunk_size, &chunk_written, NULL);
- if (!result) {
- throw std::runtime_error(format("write error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
- }
- if (chunk_written < chunk_size || chunk_written == 0) {
- throw std::runtime_error("unexpectedly failed to write bytes");
- }
- bytes_written += chunk_written;
- }
- }
- void write_u32(std::uint32_t val) const {
- write_raw(&val, sizeof(val));
- }
- ~llama_file() {
- if (fp) {
- std::fclose(fp);
- }
- }
- #else
- // use FILE * so we don't have to re-open the file to mmap
- FILE * fp;
- size_t size;
- llama_file(const char * fname, const char * mode) {
- fp = ggml_fopen(fname, mode);
- if (fp == NULL) {
- throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
- }
- seek(0, SEEK_END);
- size = tell();
- seek(0, SEEK_SET);
- }
- size_t tell() const {
- #ifdef _WIN32
- __int64 ret = _ftelli64(fp);
- #else
- long ret = std::ftell(fp);
- #endif
- if (ret == -1) {
- throw std::runtime_error(format("ftell error: %s", strerror(errno)));
- }
- return (size_t) ret;
- }
- void seek(size_t offset, int whence) const {
- #ifdef _WIN32
- int ret = _fseeki64(fp, (__int64) offset, whence);
- #else
- int ret = std::fseek(fp, (long) offset, whence);
- #endif
- if (ret != 0) {
- throw std::runtime_error(format("seek error: %s", strerror(errno)));
- }
- }
- void read_raw(void * ptr, size_t len) const {
- if (len == 0) {
- return;
- }
- errno = 0;
- std::size_t ret = std::fread(ptr, len, 1, fp);
- if (ferror(fp)) {
- throw std::runtime_error(format("read error: %s", strerror(errno)));
- }
- if (ret != 1) {
- throw std::runtime_error("unexpectedly reached end of file");
- }
- }
- uint32_t read_u32() const {
- uint32_t ret;
- read_raw(&ret, sizeof(ret));
- return ret;
- }
- void write_raw(const void * ptr, size_t len) const {
- if (len == 0) {
- return;
- }
- errno = 0;
- size_t ret = std::fwrite(ptr, len, 1, fp);
- if (ret != 1) {
- throw std::runtime_error(format("write error: %s", strerror(errno)));
- }
- }
- void write_u32(std::uint32_t val) const {
- write_raw(&val, sizeof(val));
- }
- ~llama_file() {
- if (fp) {
- std::fclose(fp);
- }
- }
- #endif
- };
- using llama_files = std::vector<std::unique_ptr<llama_file>>;
- struct llama_mmap {
- void * addr;
- size_t size;
- llama_mmap(const llama_mmap &) = delete;
- #ifdef _POSIX_MAPPED_FILES
- static constexpr bool SUPPORTED = true;
- // list of mapped fragments (first_offset, last_offset)
- std::vector<std::pair<size_t, size_t>> mapped_fragments;
- llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
- size = file->size;
- int fd = fileno(file->fp);
- int flags = MAP_SHARED;
- // prefetch/readahead impairs performance on NUMA systems
- if (numa) { prefetch = 0; }
- #ifdef __linux__
- // advise the kernel to read the file sequentially (increases readahead)
- if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) {
- LLAMA_LOG_WARN("warning: posix_fadvise(.., POSIX_FADV_SEQUENTIAL) failed: %s\n",
- strerror(errno));
- }
- if (prefetch) { flags |= MAP_POPULATE; }
- #endif
- addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
- if (addr == MAP_FAILED) { // NOLINT
- throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
- }
- if (prefetch > 0) {
- // advise the kernel to preload the mapped memory
- if (posix_madvise(addr, std::min(file->size, prefetch), POSIX_MADV_WILLNEED)) {
- LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n",
- strerror(errno));
- }
- }
- if (numa) {
- // advise the kernel not to use readahead
- // (because the next page might not belong on the same node)
- if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) {
- LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
- strerror(errno));
- }
- }
- // initialize list of mapped_fragments
- mapped_fragments.emplace_back(0, file->size);
- }
- static void align_range(size_t * first, size_t * last, size_t page_size) {
- // align first to the next page
- size_t offset_in_page = *first & (page_size - 1);
- size_t offset_to_page = offset_in_page == 0 ? 0 : page_size - offset_in_page;
- *first += offset_to_page;
- // align last to the previous page
- *last = *last & ~(page_size - 1);
- if (*last <= *first) {
- *last = *first;
- }
- }
- // partially unmap the file in the range [first, last)
- void unmap_fragment(size_t first, size_t last) {
- // note: this function must not be called multiple times with overlapping ranges
- // otherwise, there is a risk of invalidating addresses that have been repurposed for other mappings
- int page_size = sysconf(_SC_PAGESIZE);
- align_range(&first, &last, page_size);
- size_t len = last - first;
- if (len == 0) {
- return;
- }
- GGML_ASSERT(first % page_size == 0);
- GGML_ASSERT(last % page_size == 0);
- GGML_ASSERT(last > first);
- void * next_page_start = (uint8_t *) addr + first;
- // unmap the range
- if (munmap(next_page_start, len)) {
- LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
- }
- // update the list of mapped fragments to avoid unmapping the same range again in the destructor
- std::vector<std::pair<size_t, size_t>> new_mapped_fragments;
- for (const auto & frag : mapped_fragments) {
- if (frag.first < first && frag.second > last) {
- // the range is in the middle of the fragment, split it
- new_mapped_fragments.emplace_back(frag.first, first);
- new_mapped_fragments.emplace_back(last, frag.second);
- } else if (frag.first < first && frag.second > first) {
- // the range starts in the middle of the fragment
- new_mapped_fragments.emplace_back(frag.first, first);
- } else if (frag.first < last && frag.second > last) {
- // the range ends in the middle of the fragment
- new_mapped_fragments.emplace_back(last, frag.second);
- } else if (frag.first >= first && frag.second <= last) {
- // the range covers the entire fragment
- } else {
- // the range is outside the fragment
- new_mapped_fragments.push_back(frag);
- }
- }
- mapped_fragments = std::move(new_mapped_fragments);
- }
- ~llama_mmap() {
- for (const auto & frag : mapped_fragments) {
- if (munmap((char *) addr + frag.first, frag.second - frag.first)) {
- LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
- }
- }
- }
- #elif defined(_WIN32)
- static constexpr bool SUPPORTED = true;
- llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false) {
- GGML_UNUSED(numa);
- size = file->size;
- HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
- HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
- if (hMapping == NULL) {
- DWORD error = GetLastError();
- throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
- }
- addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
- DWORD error = GetLastError();
- CloseHandle(hMapping);
- if (addr == NULL) {
- throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
- }
- if (prefetch > 0) {
- #if _WIN32_WINNT >= 0x602
- // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it
- BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
- HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll");
- // may fail on pre-Windows 8 systems
- pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
- if (pPrefetchVirtualMemory) {
- // advise the kernel to preload the mapped memory
- WIN32_MEMORY_RANGE_ENTRY range;
- range.VirtualAddress = addr;
- range.NumberOfBytes = (SIZE_T) std::min(size, prefetch);
- if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
- LLAMA_LOG_WARN("warning: PrefetchVirtualMemory failed: %s\n",
- llama_format_win_err(GetLastError()).c_str());
- }
- }
- #else
- throw std::runtime_error("PrefetchVirtualMemory unavailable");
- #endif
- }
- }
- void unmap_fragment(size_t first, size_t last) {
- // not supported
- GGML_UNUSED(first);
- GGML_UNUSED(last);
- }
- ~llama_mmap() {
- if (!UnmapViewOfFile(addr)) {
- LLAMA_LOG_WARN("warning: UnmapViewOfFile failed: %s\n",
- llama_format_win_err(GetLastError()).c_str());
- }
- }
- #else
- static constexpr bool SUPPORTED = false;
- llama_mmap(struct llama_file * file, size_t prefetch = -1, bool numa = false) {
- GGML_UNUSED(file);
- GGML_UNUSED(prefetch);
- GGML_UNUSED(numa);
- throw std::runtime_error("mmap not supported");
- }
- void unmap_fragment(size_t first, size_t last) {
- GGML_UNUSED(first);
- GGML_UNUSED(last);
- throw std::runtime_error("mmap not supported");
- }
- #endif
- };
- using llama_mmaps = std::vector<std::unique_ptr<llama_mmap>>;
- // Represents some region of memory being locked using mlock or VirtualLock;
- // will automatically unlock on destruction.
- struct llama_mlock {
- void * addr = NULL;
- size_t size = 0;
- bool failed_already = false;
- llama_mlock() {}
- llama_mlock(const llama_mlock &) = delete;
- ~llama_mlock() {
- if (size) {
- raw_unlock(addr, size);
- }
- }
- void init(void * ptr) {
- GGML_ASSERT(addr == NULL && size == 0); // NOLINT
- addr = ptr;
- }
- void grow_to(size_t target_size) {
- GGML_ASSERT(addr);
- if (failed_already) {
- return;
- }
- size_t granularity = lock_granularity();
- target_size = (target_size + granularity - 1) & ~(granularity - 1);
- if (target_size > size) {
- if (raw_lock((uint8_t *) addr + size, target_size - size)) {
- size = target_size;
- } else {
- failed_already = true;
- }
- }
- }
- #ifdef _POSIX_MEMLOCK_RANGE
- static constexpr bool SUPPORTED = true;
- static size_t lock_granularity() {
- return (size_t) sysconf(_SC_PAGESIZE);
- }
- #ifdef __APPLE__
- #define MLOCK_SUGGESTION \
- "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
- "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MEMLOCK (ulimit -l).\n"
- #else
- #define MLOCK_SUGGESTION \
- "Try increasing RLIMIT_MEMLOCK ('ulimit -l' as root).\n"
- #endif
- bool raw_lock(const void * addr, size_t size) const {
- if (!mlock(addr, size)) {
- return true;
- }
- char* errmsg = std::strerror(errno);
- bool suggest = (errno == ENOMEM);
- // Check if the resource limit is fine after all
- struct rlimit lock_limit;
- if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
- suggest = false;
- }
- if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
- suggest = false;
- }
- LLAMA_LOG_WARN("warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
- size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
- return false;
- }
- #undef MLOCK_SUGGESTION
- static void raw_unlock(void * addr, size_t size) {
- if (munlock(addr, size)) {
- LLAMA_LOG_WARN("warning: failed to munlock buffer: %s\n", std::strerror(errno));
- }
- }
- #elif defined(_WIN32)
- static constexpr bool SUPPORTED = true;
- static size_t lock_granularity() {
- SYSTEM_INFO si;
- GetSystemInfo(&si);
- return (size_t) si.dwPageSize;
- }
- bool raw_lock(void * ptr, size_t len) const {
- for (int tries = 1; ; tries++) {
- if (VirtualLock(ptr, len)) {
- return true;
- }
- if (tries == 2) {
- LLAMA_LOG_WARN("warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
- len, size, llama_format_win_err(GetLastError()).c_str());
- return false;
- }
- // It failed but this was only the first try; increase the working
- // set size and try again.
- SIZE_T min_ws_size, max_ws_size;
- if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
- LLAMA_LOG_WARN("warning: GetProcessWorkingSetSize failed: %s\n",
- llama_format_win_err(GetLastError()).c_str());
- return false;
- }
- // Per MSDN: "The maximum number of pages that a process can lock
- // is equal to the number of pages in its minimum working set minus
- // a small overhead."
- // Hopefully a megabyte is enough overhead:
- size_t increment = len + 1048576;
- // The minimum must be <= the maximum, so we need to increase both:
- min_ws_size += increment;
- max_ws_size += increment;
- if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
- LLAMA_LOG_WARN("warning: SetProcessWorkingSetSize failed: %s\n",
- llama_format_win_err(GetLastError()).c_str());
- return false;
- }
- }
- }
- static void raw_unlock(void * ptr, size_t len) {
- if (!VirtualUnlock(ptr, len)) {
- LLAMA_LOG_WARN("warning: failed to VirtualUnlock buffer: %s\n",
- llama_format_win_err(GetLastError()).c_str());
- }
- }
- #else
- static constexpr bool SUPPORTED = false;
- static size_t lock_granularity() {
- return (size_t) 65536;
- }
- bool raw_lock(const void * addr, size_t len) const {
- LLAMA_LOG_WARN("warning: mlock not supported on this system\n");
- return false;
- }
- static void raw_unlock(const void * addr, size_t len) {}
- #endif
- };
- using llama_mlocks = std::vector<std::unique_ptr<llama_mlock>>;
- // NOTE: avoid ever using this except for building the token_to_piece caches
- static std::string llama_token_to_piece(const struct llama_model * model, llama_token token, bool special) {
- std::string piece;
- piece.resize(piece.capacity()); // using string internal cache
- const int n_chars = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special);
- if (n_chars < 0) {
- piece.resize(-n_chars);
- int check = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special);
- GGML_ASSERT(check == -n_chars);
- }
- else {
- piece.resize(n_chars);
- }
- return piece;
- }
- static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer) {
- ggml_backend_buffer_type_t buft = nullptr;
- #if defined(GGML_USE_CUDA)
- // host buffers should only be used when data is expected to be copied to/from the GPU
- if (host_buffer) {
- buft = ggml_backend_cuda_host_buffer_type();
- }
- #elif defined(GGML_USE_SYCL)
- if (host_buffer) {
- buft = ggml_backend_sycl_host_buffer_type();
- }
- #elif defined(GGML_USE_CANN)
- if (host_buffer) {
- buft = ggml_backend_cann_host_buffer_type();
- }
- #elif defined(GGML_USE_CPU_HBM)
- buft = ggml_backend_cpu_hbm_buffer_type();
- #elif defined(GGML_USE_VULKAN)
- if (host_buffer) {
- buft = ggml_backend_vk_host_buffer_type();
- }
- #endif
- if (buft == nullptr) {
- buft = ggml_backend_cpu_buffer_type();
- }
- return buft;
- GGML_UNUSED(host_buffer);
- }
- //
- // globals
- //
- struct llama_state {
- llama_state() {
- #ifdef GGML_USE_METAL
- ggml_backend_metal_log_set_callback(log_callback, log_callback_user_data);
- #elif defined(GGML_USE_CUDA)
- ggml_backend_cuda_log_set_callback(log_callback, log_callback_user_data);
- #elif defined(GGML_USE_CANN)
- ggml_backend_cann_log_set_callback(log_callback, log_callback_user_data);
- #endif
- }
- // We save the log callback globally
- ggml_log_callback log_callback = llama_log_callback_default;
- void * log_callback_user_data = nullptr;
- };
- static llama_state g_state;
- // available llama models
- enum e_model {
- MODEL_UNKNOWN,
- MODEL_14M,
- MODEL_17M,
- MODEL_22M,
- MODEL_33M,
- MODEL_60M,
- MODEL_70M,
- MODEL_80M,
- MODEL_109M,
- MODEL_137M,
- MODEL_160M,
- MODEL_220M,
- MODEL_250M,
- MODEL_270M,
- MODEL_335M,
- MODEL_410M,
- MODEL_450M,
- MODEL_770M,
- MODEL_780M,
- MODEL_0_5B,
- MODEL_1B,
- MODEL_1_3B,
- MODEL_1_4B,
- MODEL_1_6B,
- MODEL_2B,
- MODEL_2_8B,
- MODEL_3B,
- MODEL_4B,
- MODEL_6B,
- MODEL_6_9B,
- MODEL_7B,
- MODEL_8B,
- MODEL_9B,
- MODEL_11B,
- MODEL_12B,
- MODEL_13B,
- MODEL_14B,
- MODEL_15B,
- MODEL_16B,
- MODEL_20B,
- MODEL_22B,
- MODEL_30B,
- MODEL_34B,
- MODEL_35B,
- MODEL_40B,
- MODEL_65B,
- MODEL_70B,
- MODEL_90B,
- MODEL_236B,
- MODEL_314B,
- MODEL_SMALL,
- MODEL_MEDIUM,
- MODEL_LARGE,
- MODEL_XL,
- MODEL_A1_7B,
- MODEL_A2_7B,
- MODEL_8x7B,
- MODEL_8x22B,
- MODEL_16x12B,
- MODEL_10B_128x3_66B,
- MODEL_57B_A14B,
- MODEL_27B,
- };
- static const size_t kiB = 1024;
- static const size_t MiB = 1024*kiB;
- static const size_t GiB = 1024*MiB;
- struct llama_hparams {
- bool vocab_only;
- bool rope_finetuned;
- bool use_par_res;
- bool swin_norm;
- uint32_t n_vocab;
- uint32_t n_ctx_train; // context size the model was trained on
- uint32_t n_embd;
- uint32_t n_layer;
- uint32_t n_rot;
- uint32_t n_swa = 0; // sliding window attention (SWA)
- uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
- uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
- uint32_t n_expert = 0;
- uint32_t n_expert_used = 0;
- uint32_t n_vocab_type = 0; // for BERT-style token types
- uint32_t n_rel_attn_bkts = 0;
- std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_arr;
- std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_kv_arr;
- std::array<uint32_t, LLAMA_MAX_LAYERS> n_ff_arr;
- std::array<std::array<uint32_t, LLAMA_MAX_LAYERS>, 4> n_bskcn_arr;
- std::array<uint32_t, LLAMA_MAX_LAYERS> cross_attn_layers;
- uint32_t n_layer_dense_lead = 0;
- uint32_t n_lora_q = 0;
- uint32_t n_lora_kv = 0;
- uint32_t n_ff_exp = 0;
- uint32_t n_ff_shexp = 0;
- uint32_t n_expert_shared = 0;
- float expert_weights_scale = 0.0;
- float f_norm_eps;
- float f_norm_rms_eps;
- float f_attn_logit_softcapping = 50.0f;
- float f_final_logit_softcapping = 30.0f;
- // for RWKV
- uint32_t rescale_every_n_layers = 0;
- uint32_t time_mix_extra_dim = 0;
- uint32_t time_decay_extra_dim = 0;
- uint32_t wkv_head_size = 0;
- float rope_attn_factor = 1.0f;
- float rope_freq_base_train;
- float rope_freq_scale_train;
- uint32_t n_ctx_orig_yarn;
- float rope_yarn_log_mul;
- // for State Space Models
- uint32_t ssm_d_conv = 0;
- uint32_t ssm_d_inner = 0;
- uint32_t ssm_d_state = 0;
- uint32_t ssm_dt_rank = 0;
- bool ssm_dt_b_c_rms = false;
- float f_clamp_kqv = 0.0f;
- float f_max_alibi_bias = 0.0f;
- float f_logit_scale = 0.0f;
- // Additional scale factors (Granite/Granite MoE)
- float f_residual_scale = 0.0f;
- float f_embedding_scale = 0.0f;
- float f_attention_scale = 0.0f;
- bool causal_attn = true;
- bool use_alibi = false;
- bool attn_soft_cap = false;
- // needed by encoder-decoder models (e.g. T5, FLAN-T5)
- // ref: https://github.com/ggerganov/llama.cpp/pull/8141
- llama_token dec_start_token_id = -1;
- enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE;
- enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
- enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
- bool operator!=(const llama_hparams & other) const {
- if (this->vocab_only != other.vocab_only) return true;
- if (this->n_vocab != other.n_vocab) return true;
- if (this->n_ctx_train != other.n_ctx_train) return true;
- if (this->n_embd != other.n_embd) return true;
- if (this->n_layer != other.n_layer) return true;
- if (this->n_rot != other.n_rot) return true;
- if (this->n_swa != other.n_swa) return true;
- if (this->n_embd_head_k != other.n_embd_head_k) return true;
- if (this->n_embd_head_v != other.n_embd_head_v) return true;
- if (this->n_expert != other.n_expert) return true;
- if (this->n_expert_used != other.n_expert_used) return true;
- if (this->n_head_arr != other.n_head_arr) return true;
- if (this->n_head_kv_arr != other.n_head_kv_arr) return true;
- if (this->n_ff_arr != other.n_ff_arr) return true;
- if (this->n_bskcn_arr != other.n_bskcn_arr) return true;
- if (this->cross_attn_layers != other.cross_attn_layers) return true;
- if (this->n_rel_attn_bkts != other.n_rel_attn_bkts) return true;
- if (this->n_layer_dense_lead != other.n_layer_dense_lead) return true;
- if (this->n_lora_q != other.n_lora_q) return true;
- if (this->n_lora_kv != other.n_lora_kv) return true;
- if (this->n_ff_exp != other.n_ff_exp) return true;
- if (this->n_ff_shexp != other.n_ff_shexp) return true;
- if (this->n_expert_shared != other.n_expert_shared) return true;
- if (this->rope_finetuned != other.rope_finetuned) return true;
- if (this->n_ctx_orig_yarn != other.n_ctx_orig_yarn) return true;
- if (this->ssm_d_conv != other.ssm_d_conv) return true;
- if (this->ssm_d_inner != other.ssm_d_inner) return true;
- if (this->ssm_d_state != other.ssm_d_state) return true;
- if (this->ssm_dt_rank != other.ssm_dt_rank) return true;
- if (this->ssm_dt_b_c_rms != other.ssm_dt_b_c_rms) return true;
- if (this->rescale_every_n_layers != other.rescale_every_n_layers) return true;
- if (this->time_mix_extra_dim != other.time_mix_extra_dim) return true;
- if (this->time_decay_extra_dim != other.time_decay_extra_dim) return true;
- if (this->wkv_head_size != other.wkv_head_size) return true;
- if (this->dec_start_token_id != other.dec_start_token_id) return true;
- const float EPSILON = 1e-9f;
- if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true;
- if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true;
- if (!is_float_close(this->rope_attn_factor, other.rope_attn_factor, EPSILON)) return true;
- if (!is_float_close(this->rope_freq_base_train, other.rope_freq_base_train, EPSILON)) return true;
- if (!is_float_close(this->rope_freq_scale_train, other.rope_freq_scale_train, EPSILON)) return true;
- if (!is_float_close(this->expert_weights_scale, other.expert_weights_scale, EPSILON)) return true;
- if (!is_float_close(this->rope_yarn_log_mul, other.rope_yarn_log_mul, EPSILON)) return true;
- if (!is_float_close(this->f_residual_scale, other.f_residual_scale, EPSILON)) return true;
- if (!is_float_close(this->f_embedding_scale, other.f_embedding_scale, EPSILON)) return true;
- if (!is_float_close(this->f_attention_scale, other.f_attention_scale, EPSILON)) return true;
- return false;
- }
- uint32_t n_head(uint32_t il = 0) const {
- if (il < n_layer) {
- return n_head_arr[il];
- }
- GGML_ABORT("fatal error");
- }
- uint32_t n_head_kv(uint32_t il = 0) const {
- if (il < n_layer) {
- return n_head_kv_arr[il];
- }
- GGML_ABORT("fatal error");
- }
- uint32_t n_ff(uint32_t il = 0) const {
- if (il < n_layer) {
- return n_ff_arr[il];
- }
- GGML_ABORT("fatal error");
- }
- uint32_t n_gqa(uint32_t il = 0) const {
- const uint32_t n_head = this->n_head(il);
- const uint32_t n_head_kv = this->n_head_kv(il);
- if (n_head_kv == 0) {
- return 0;
- }
- return n_head/n_head_kv;
- }
- uint32_t n_embd_k_gqa(uint32_t il = 0) const { // dimension of key embeddings across all k-v heads
- const uint32_t n_head_kv = this->n_head_kv(il);
- return n_embd_head_k * n_head_kv;
- }
- uint32_t n_embd_v_gqa(uint32_t il = 0) const { // dimension of value embeddings across all k-v heads
- const uint32_t n_head_kv = this->n_head_kv(il);
- return n_embd_head_v * n_head_kv;
- }
- uint32_t n_embd_k_s() const { // dimension of the rolling state embeddings
- // corresponds to Mamba's conv_states size or RWKV's token_shift states size
- if (wkv_head_size != 0) {
- // for RWKV models
- return 2 * n_embd;
- } else {
- // TODO: maybe support other convolution strides than 1
- // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed
- return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner;
- }
- }
- uint32_t n_embd_v_s() const { // dimension of the recurrent state embeddings
- if (wkv_head_size != 0) {
- // corresponds to RWKV's wkv_states size
- return n_embd * wkv_head_size;
- } else {
- // corresponds to Mamba's ssm_states size
- return ssm_d_state * ssm_d_inner;
- }
- }
- bool n_bskcn(uint32_t n, uint32_t il = 0) const {
- if (il < n_layer) {
- return n_bskcn_arr[n][il] > 0;
- }
- GGML_ABORT("fatal error");
- }
- bool cross_attention_layers(uint32_t il) const {
- return std::find(cross_attn_layers.begin(), cross_attn_layers.end(), il) != cross_attn_layers.end();
- }
- };
- static_assert(std::is_trivially_copyable<llama_hparams>::value, "llama_hparams must be trivially copyable");
- struct llama_cparams {
- uint32_t n_ctx; // context size used during inference
- uint32_t n_batch;
- uint32_t n_ubatch;
- uint32_t n_seq_max;
- int n_threads; // number of threads to use for generation
- int n_threads_batch; // number of threads to use for batch processing
- float rope_freq_base;
- float rope_freq_scale;
- uint32_t n_ctx_orig_yarn;
- // These hyperparameters are not exposed in GGUF, because all
- // existing YaRN models use the same values for them.
- float yarn_ext_factor;
- float yarn_attn_factor;
- float yarn_beta_fast;
- float yarn_beta_slow;
- float defrag_thold;
- bool embeddings;
- bool causal_attn;
- bool offload_kqv;
- bool flash_attn;
- bool no_perf;
- // TODO (jmorganca): this should most likely be passed in as part of a batch
- // and not set on the context for all batches.
- bool cross_attn = false;
- enum llama_pooling_type pooling_type;
- ggml_backend_sched_eval_callback cb_eval;
- void * cb_eval_user_data;
- };
- // TODO: separate into "llama_layer_enc" and "llama_layer_dec"
- struct llama_layer {
- // normalization
- struct ggml_tensor * attn_norm;
- struct ggml_tensor * attn_norm_b;
- struct ggml_tensor * attn_norm_2;
- struct ggml_tensor * attn_norm_2_b;
- struct ggml_tensor * attn_q_norm;
- struct ggml_tensor * attn_q_norm_b;
- struct ggml_tensor * attn_k_norm;
- struct ggml_tensor * attn_k_norm_b;
- struct ggml_tensor * attn_out_norm;
- struct ggml_tensor * attn_out_norm_b;
- struct ggml_tensor * attn_q_a_norm;
- struct ggml_tensor * attn_kv_a_norm;
- struct ggml_tensor * attn_sub_norm;
- struct ggml_tensor * attn_post_norm;
- struct ggml_tensor * ffn_sub_norm;
- struct ggml_tensor * attn_norm_cross;
- struct ggml_tensor * attn_norm_enc;
- // attention
- struct ggml_tensor * wq;
- struct ggml_tensor * wk;
- struct ggml_tensor * wv;
- struct ggml_tensor * wo;
- struct ggml_tensor * wqkv;
- struct ggml_tensor * wq_a;
- struct ggml_tensor * wq_b;
- struct ggml_tensor * wkv_a_mqa;
- struct ggml_tensor * wkv_b;
- struct ggml_tensor * wq_cross;
- struct ggml_tensor * wk_cross;
- struct ggml_tensor * wv_cross;
- struct ggml_tensor * wo_cross;
- struct ggml_tensor * wq_enc;
- struct ggml_tensor * wk_enc;
- struct ggml_tensor * wv_enc;
- struct ggml_tensor * wo_enc;
- // attention bias
- struct ggml_tensor * bq;
- struct ggml_tensor * bk;
- struct ggml_tensor * bv;
- struct ggml_tensor * bo;
- struct ggml_tensor * bqkv;
- // relative position bias
- struct ggml_tensor * attn_rel_b;
- struct ggml_tensor * attn_rel_b_enc;
- struct ggml_tensor * attn_rel_b_cross;
- // normalization
- struct ggml_tensor * ffn_norm;
- struct ggml_tensor * ffn_norm_b;
- struct ggml_tensor * ffn_post_norm;
- struct ggml_tensor * layer_out_norm;
- struct ggml_tensor * layer_out_norm_b;
- struct ggml_tensor * ffn_norm_exps;
- struct ggml_tensor * ffn_norm_enc;
- // ff
- struct ggml_tensor * ffn_gate; // w1
- struct ggml_tensor * ffn_down; // w2
- struct ggml_tensor * ffn_up; // w3
- struct ggml_tensor * ffn_gate_enc;
- struct ggml_tensor * ffn_down_enc;
- struct ggml_tensor * ffn_up_enc;
- // ff MoE
- struct ggml_tensor * ffn_gate_inp;
- struct ggml_tensor * ffn_gate_exps;
- struct ggml_tensor * ffn_down_exps;
- struct ggml_tensor * ffn_up_exps ;
- // ff shared expert (shexp)
- struct ggml_tensor * ffn_gate_inp_shexp;
- struct ggml_tensor * ffn_gate_shexp;
- struct ggml_tensor * ffn_down_shexp;
- struct ggml_tensor * ffn_up_shexp;
- // ff bias
- struct ggml_tensor * ffn_gate_b = nullptr;
- struct ggml_tensor * ffn_down_b = nullptr; // b2
- struct ggml_tensor * ffn_up_b = nullptr; // b3
- struct ggml_tensor * ffn_act;
- // mamba proj
- struct ggml_tensor * ssm_in;
- struct ggml_tensor * ssm_x;
- struct ggml_tensor * ssm_dt;
- struct ggml_tensor * ssm_out;
- // mamba
- struct ggml_tensor * ssm_conv1d;
- struct ggml_tensor * ssm_a;
- struct ggml_tensor * ssm_d;
- // mamba bias
- struct ggml_tensor * ssm_conv1d_b;
- struct ggml_tensor * ssm_dt_b;
- // rwkv
- struct ggml_tensor * time_mix_w1;
- struct ggml_tensor * time_mix_w2;
- struct ggml_tensor * time_mix_lerp_x;
- struct ggml_tensor * time_mix_lerp_w;
- struct ggml_tensor * time_mix_lerp_k;
- struct ggml_tensor * time_mix_lerp_v;
- struct ggml_tensor * time_mix_lerp_r;
- struct ggml_tensor * time_mix_lerp_g;
- struct ggml_tensor * time_mix_first;
- struct ggml_tensor * time_mix_decay;
- struct ggml_tensor * time_mix_decay_w1;
- struct ggml_tensor * time_mix_decay_w2;
- struct ggml_tensor * time_mix_key;
- struct ggml_tensor * time_mix_value;
- struct ggml_tensor * time_mix_receptance;
- struct ggml_tensor * time_mix_gate;
- struct ggml_tensor * time_mix_ln;
- struct ggml_tensor * time_mix_ln_b;
- struct ggml_tensor * time_mix_output;
- struct ggml_tensor * channel_mix_lerp_k;
- struct ggml_tensor * channel_mix_lerp_r;
- struct ggml_tensor * channel_mix_key;
- struct ggml_tensor * channel_mix_receptance;
- struct ggml_tensor * channel_mix_value;
- // long rope factors
- struct ggml_tensor * rope_long = nullptr;
- struct ggml_tensor * rope_short = nullptr;
- struct ggml_tensor * rope_freqs = nullptr;
- // bitnet scale
- struct ggml_tensor * wq_scale;
- struct ggml_tensor * wk_scale;
- struct ggml_tensor * wv_scale;
- struct ggml_tensor * wo_scale;
- struct ggml_tensor * ffn_gate_scale;
- struct ggml_tensor * ffn_up_scale;
- struct ggml_tensor * ffn_down_scale;
- struct ggml_tensor * bskcn_tv;
- // cross attention
- struct ggml_tensor * cross_attn_k_norm;
- struct ggml_tensor * cross_attn_k_proj;
- struct ggml_tensor * cross_attn_o_proj;
- struct ggml_tensor * cross_attn_q_norm;
- struct ggml_tensor * cross_attn_q_proj;
- struct ggml_tensor * cross_attn_v_proj;
- struct ggml_tensor * cross_attn_attn_gate;
- struct ggml_tensor * cross_attn_mlp_gate;
- };
- // very similar to llama_batch,
- // but has more metadata about sequences
- struct llama_ubatch {
- bool equal_seqs;
- // TODO: whole_seqs for embeddings?
- uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs)
- uint32_t n_seq_tokens; // tokens per sequence
- uint32_t n_seqs;
- llama_token * token; // [n_tokens]
- float * embd; // [n_embd, n_tokens]
- llama_pos * pos; // [n_tokens]
- int32_t * n_seq_id; // [n_seqs]
- llama_seq_id ** seq_id; // [n_seqs]
- int8_t * output; // [n_tokens]
- };
- struct llama_kv_cell {
- llama_pos pos = -1;
- llama_pos delta = 0;
- int32_t src = -1; // used by recurrent state models to copy states
- int32_t tail = -1;
- std::set<llama_seq_id> seq_id;
- bool has_seq_id(const llama_seq_id & id) const {
- return seq_id.find(id) != seq_id.end();
- }
- bool is_empty() const {
- return seq_id.empty();
- }
- bool is_same_seq(const llama_kv_cell & other) const {
- return seq_id == other.seq_id;
- }
- };
- // ring-buffer of cached KV data
- struct llama_kv_cache {
- bool has_shift = false;
- bool do_defrag = false;
- bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
- bool v_trans = true; // the value tensor is transposed
- // Note: The value of head isn't only used to optimize searching
- // for a free KV slot. llama_decode_internal also uses it, so it
- // cannot be freely changed after a slot has been allocated.
- uint32_t head = 0;
- uint32_t size = 0;
- uint32_t used = 0; // used cells (i.e. at least one seq_id)
- // computed before each graph build
- uint32_t n = 0;
- ggml_type type_k = GGML_TYPE_F16;
- ggml_type type_v = GGML_TYPE_F16;
- std::vector<llama_kv_cell> cells;
- std::vector<struct ggml_tensor *> k_l; // per layer
- std::vector<struct ggml_tensor *> v_l;
- std::vector<struct ggml_context *> ctxs;
- std::vector<ggml_backend_buffer_t> bufs;
- size_t total_size() const {
- size_t size = 0;
- for (ggml_backend_buffer_t buf : bufs) {
- size += ggml_backend_buffer_get_size(buf);
- }
- return size;
- }
- ~llama_kv_cache() {
- for (struct ggml_context * ctx : ctxs) {
- ggml_free(ctx);
- }
- for (ggml_backend_buffer_t buf : bufs) {
- ggml_backend_buffer_free(buf);
- }
- }
- };
- struct llama_control_vector {
- std::vector<struct ggml_tensor *> tensors; // per layer
- std::vector<struct ggml_context *> ctxs;
- std::vector<ggml_backend_buffer_t> bufs;
- int32_t layer_start = -1;
- int32_t layer_end = -1;
- struct ggml_tensor * tensor_for(int il) const {
- if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) {
- return nullptr;
- }
- return tensors[il];
- }
- struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const {
- ggml_tensor * layer_dir = tensor_for(il);
- if (layer_dir != nullptr) {
- cur = ggml_add(ctx, cur, layer_dir);
- }
- return cur;
- }
- ~llama_control_vector() {
- for (struct ggml_context * ctx : ctxs) {
- ggml_free(ctx);
- }
- for (ggml_backend_buffer_t buf : bufs) {
- ggml_backend_buffer_free(buf);
- }
- }
- };
- struct llama_model {
- e_model type = MODEL_UNKNOWN;
- llm_arch arch = LLM_ARCH_UNKNOWN;
- llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
- std::string name = "n/a";
- llama_hparams hparams = {};
- llama_vocab vocab;
- // TODO: should init all tensors to nullptr
- struct ggml_tensor * tok_embd;
- struct ggml_tensor * type_embd;
- struct ggml_tensor * pos_embd;
- struct ggml_tensor * tok_norm;
- struct ggml_tensor * tok_norm_b;
- struct ggml_tensor * output_norm;
- struct ggml_tensor * output_norm_b;
- struct ggml_tensor * output;
- struct ggml_tensor * output_b;
- struct ggml_tensor * output_norm_enc;
- // classifier
- struct ggml_tensor * cls;
- struct ggml_tensor * cls_b;
- struct ggml_tensor * cls_out = nullptr;
- struct ggml_tensor * cls_out_b = nullptr;
- std::vector<llama_layer> layers;
- llama_split_mode split_mode;
- int main_gpu;
- int n_gpu_layers;
- std::vector<std::string> rpc_servers;
- // gguf metadata
- std::unordered_map<std::string, std::string> gguf_kv;
- // layer -> buffer type mapping
- struct layer_buft {
- layer_buft() : buft_matrix(nullptr), buft(nullptr) {}
- layer_buft(ggml_backend_buffer_type_t matrix) : buft_matrix(matrix), buft(matrix) {}
- layer_buft(ggml_backend_buffer_type_t matrix, ggml_backend_buffer_type_t other) : buft_matrix(matrix), buft(other) {}
- ggml_backend_buffer_type_t buft_matrix; // matrices only - used by split buffers and backends that support only matrix multiplication
- ggml_backend_buffer_type_t buft; // everything else
- };
- layer_buft buft_input;
- layer_buft buft_output;
- std::vector<layer_buft> buft_layer;
- // contexts where the model tensors metadata is stored
- std::vector<struct ggml_context *> ctxs;
- // the model memory buffers for the tensor data
- std::vector<ggml_backend_buffer_t> bufs;
- // model memory mapped files
- llama_mmaps mappings;
- // objects representing data potentially being locked in memory
- llama_mlocks mlock_bufs;
- llama_mlocks mlock_mmaps;
- // for quantize-stats only
- std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
- int64_t t_load_us = 0;
- int64_t t_start_us = 0;
- // keep track of loaded lora adapters
- std::set<struct llama_lora_adapter *> lora_adapters;
- ~llama_model() {
- for (struct ggml_context * ctx : ctxs) {
- ggml_free(ctx);
- }
- for (ggml_backend_buffer_t buf : bufs) {
- #ifdef GGML_USE_CUDA
- if (ggml_backend_buffer_get_type(buf) == ggml_backend_cpu_buffer_type()) {
- ggml_backend_cuda_unregister_host_buffer(ggml_backend_buffer_get_base(buf));
- }
- #endif
- ggml_backend_buffer_free(buf);
- }
- while (!lora_adapters.empty()) {
- llama_lora_adapter_free(*lora_adapters.begin());
- }
- }
- };
- struct llama_sbatch_seq {
- int32_t n_seq_id;
- llama_seq_id * seq_id;
- size_t offset;
- size_t length;
- // helper for smoother batch API transition -- can be deprecated in the future
- llama_seq_id all_seq_id; // used if seq_id == NULL
- };
- // sequence-length-aware batch splitting
- struct llama_sbatch {
- // tokens left in this batch
- size_t n_tokens;
- size_t n_embd;
- bool logits_all; // TODO: remove once lctx.logits_all is removed too
- // sorted indices into the batch
- std::vector<size_t> ids;
- // batch indices of the output
- std::vector<size_t> out_ids;
- std::vector<llama_sbatch_seq> seq;
- const llama_batch * batch = nullptr;
- // buffers for the ubatch
- std::vector<llama_token> ubatch_token;
- std::vector<float> ubatch_embd;
- std::vector<llama_pos> ubatch_pos;
- std::vector<int32_t> ubatch_n_seq_id;
- std::vector<llama_seq_id *> ubatch_seq_id;
- std::vector<int8_t> ubatch_output;
- llama_ubatch reserve_ubatch(size_t n_ubatch, bool has_embd = false) {
- // clear empty sequences
- // the previous ubatch is assumed to be gone,
- // so nothing should refer to values in these sequences anymore.
- for (size_t i = seq.size(); i-- > 0;) {
- if (seq[i].length == 0) {
- seq.pop_back();
- } else {
- break;
- }
- }
- ubatch_token.resize(!has_embd ? n_ubatch : 0);
- ubatch_embd.resize(has_embd ? n_embd * n_ubatch : 0);
- ubatch_pos.resize(n_ubatch);
- ubatch_n_seq_id.resize(n_ubatch);
- ubatch_seq_id.resize(n_ubatch);
- ubatch_output.resize(n_ubatch);
- llama_ubatch ubatch = {
- /*equal_seqs =*/ true,
- /*n_tokens =*/ 0,
- /*n_seq_tokens =*/ 0,
- /*n_seqs =*/ 0,
- /*token =*/ !has_embd ? ubatch_token.data() : nullptr,
- /*embd =*/ has_embd ? ubatch_embd.data() : nullptr,
- /*pos =*/ ubatch_pos.data(),
- /*n_seq_id =*/ ubatch_n_seq_id.data(),
- /*seq_id =*/ ubatch_seq_id.data(),
- /*output =*/ ubatch_output.data(),
- };
- return ubatch;
- }
- void add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length) {
- GGML_ASSERT(batch != nullptr);
- GGML_ASSERT(length <= seq.length);
- // Can only add sequences of equal lengths to a batch,
- // otherwise it isn't clear to which sequence a token belongs
- GGML_ASSERT(seq.n_seq_id == 0 || ubatch.n_seqs == 0 || length == (size_t) ubatch.n_tokens / ubatch.n_seqs);
- GGML_ASSERT((seq.n_seq_id != 0) == ubatch.equal_seqs);
- // NOTE: loops are separated for cache-friendliness
- if (batch->token) {
- if (ubatch.equal_seqs) {
- for (size_t i = 0; i < length; ++i) {
- ubatch.token[ubatch.n_tokens + i] = batch->token[ids[seq.offset + i]];
- }
- } else {
- // simple split
- ubatch.token = batch->token + seq.offset;
- }
- } else {
- ubatch.token = nullptr;
- }
- if (batch->embd) {
- if (ubatch.equal_seqs) {
- for (size_t i = 0; i < length; ++i) {
- memcpy(
- ubatch.embd + n_embd * (ubatch.n_tokens + i),
- batch->embd + n_embd * ids[seq.offset + i],
- n_embd * sizeof(float)
- );
- }
- } else {
- // simple split
- ubatch.embd = batch->embd + (n_embd * seq.offset);
- }
- } else {
- ubatch.embd = nullptr;
- }
- // from here on, the else branches are deprecated;
- // they are helpers for smoother batch API transition
- if (batch->pos) {
- if (ubatch.equal_seqs) {
- for (size_t i = 0; i < length; ++i) {
- ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]];
- }
- } else {
- // simple split
- ubatch.pos = batch->pos + seq.offset;
- }
- } else {
- for (size_t i = 0; i < length; ++i) {
- llama_pos bi = ids[seq.offset + i];
- ubatch.pos[ubatch.n_tokens + i] = batch->all_pos_0 + (bi * batch->all_pos_1);
- }
- }
- if (ubatch.equal_seqs) {
- ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id;
- if (seq.seq_id) {
- ubatch.seq_id[ubatch.n_seqs] = seq.seq_id;
- } else {
- GGML_ASSERT(seq.n_seq_id == 1);
- ubatch.seq_id[ubatch.n_seqs] = &seq.all_seq_id;
- }
- } else {
- // simple split
- if (batch->n_seq_id) {
- ubatch.n_seq_id = batch->n_seq_id + seq.offset;
- } else {
- for (size_t i = 0; i < length; ++i) {
- ubatch.n_seq_id[ubatch.n_seqs + i] = 1;
- }
- }
- if (batch->seq_id) {
- ubatch.seq_id = batch->seq_id + seq.offset;
- } else {
- for (size_t i = 0; i < length; ++i) {
- ubatch.seq_id[ubatch.n_seqs + i] = &seq.all_seq_id;
- }
- }
- }
- if (logits_all) {
- for (size_t i = 0; i < length; ++i) {
- ubatch.output[ubatch.n_tokens + i] = 1;
- out_ids.push_back(ids[seq.offset + i]);
- }
- } else if (batch->logits) {
- if (ubatch.equal_seqs) {
- for (size_t i = 0; i < length; ++i) {
- size_t id = ids[seq.offset + i];
- int8_t is_output = batch->logits[id];
- ubatch.output[ubatch.n_tokens + i] = is_output;
- if (is_output) { out_ids.push_back(id); }
- }
- } else {
- // simple split
- ubatch.output = batch->logits + seq.offset;
- for (size_t i = 0; i < length; ++i) {
- if (ubatch.output[i] != 0) { out_ids.push_back(seq.offset + i); }
- }
- }
- } else {
- // only get last output
- for (size_t i = 0; i < length; ++i) {
- size_t id = ids[seq.offset + i];
- int8_t is_last = id == ids.size() - 1;
- ubatch.output[ubatch.n_tokens + i] = is_last;
- if (is_last) { out_ids.push_back(id); }
- }
- }
- if (ubatch.n_tokens == 0 && ubatch.n_seqs == 0) {
- ubatch.n_seq_tokens = ubatch.equal_seqs ? length : 1;
- }
- ubatch.n_tokens += length;
- ubatch.n_seqs += ubatch.equal_seqs ? 1 : length; // virtual sequences for simple splits
- seq.offset += length;
- seq.length -= length;
- n_tokens -= length;
- GGML_ASSERT(ubatch.n_tokens == ubatch.n_seq_tokens * ubatch.n_seqs);
- }
- // simple split, unknown number of sequences of unequal lengths
- llama_ubatch split_simple(size_t n_ubatch) {
- n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
- llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
- ubatch.equal_seqs = false;
- if (!seq.empty()) {
- llama_sbatch_seq & s = seq[0];
- size_t length = s.length < n_ubatch ? s.length : n_ubatch;
- GGML_ASSERT(seq.size() == 1 && s.n_seq_id == 0); // don't mix with other splits
- add_seq_to_ubatch(ubatch, s, length);
- }
- return ubatch;
- }
- // make batches of equal-length sequences
- llama_ubatch split_equal(size_t n_ubatch) {
- n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
- llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
- if (!seq.empty()) {
- size_t length = 0;
- size_t n_tokens_in_ubatch = 0;
- GGML_ASSERT(seq[0].n_seq_id > 0); // should not be mixed with simple splits
- // smallest first, because it's easier to split this way;
- // starting from the end to pop in constant time.
- for (size_t i = seq.size(); i-- > 0;) {
- llama_sbatch_seq & s = seq[i];
- GGML_ASSERT(s.length > 0);
- if (length == 0) {
- length = s.length < n_ubatch ? s.length : n_ubatch;
- }
- add_seq_to_ubatch(ubatch, s, length);
- n_tokens_in_ubatch += length;
- // shared prompts can't be mixed with any of their sequences,
- // so it's safer to compute them in their own ubatch
- if (s.n_seq_id > 1) { break; }
- // stop when there isn't enough space for another sequence
- if (length + n_tokens_in_ubatch > n_ubatch) { break; }
- }
- }
- return ubatch;
- }
- // sequence-wise split
- llama_ubatch split_seq(size_t n_ubatch) {
- n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
- llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
- if (!seq.empty()) {
- llama_sbatch_seq & s = seq[seq.size() - 1];
- size_t length = s.length < n_ubatch ? s.length : n_ubatch;
- GGML_ASSERT(s.n_seq_id > 0); // should not be mixed with simple splits
- add_seq_to_ubatch(ubatch, s, length);
- }
- return ubatch;
- }
- void from_batch(const llama_batch & batch, const size_t n_embd, const bool simple_split = false, const bool logits_all = false) {
- GGML_ASSERT(batch.n_tokens >= 0);
- this->batch = &batch;
- this->n_embd = n_embd;
- this->logits_all = logits_all;
- n_tokens = batch.n_tokens;
- ids.resize(n_tokens);
- out_ids.clear();
- // TODO: reserve out_ids and seq
- for (size_t i = 0; i < n_tokens; ++i) {
- ids[i] = i;
- }
- if (simple_split) {
- seq.resize(1);
- llama_sbatch_seq & s = seq[0];
- s.n_seq_id = 0;
- s.seq_id = nullptr;
- s.offset = 0;
- s.length = n_tokens;
- s.all_seq_id = batch.all_seq_id;
- return;
- }
- std::sort(ids.begin(), ids.end(),
- [&batch](size_t a, size_t b) {
- int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1;
- int32_t n_seq_b = batch.n_seq_id ? batch.n_seq_id[b] : 1;
- // sort by seq_id, then by pos
- if (n_seq_a == n_seq_b) {
- if (batch.seq_id) {
- for (int32_t i = 0; i < n_seq_a; ++i) {
- llama_seq_id seq_id_a = batch.seq_id[a][i];
- llama_seq_id seq_id_b = batch.seq_id[b][i];
- // smaller seq_ids go first
- if (seq_id_a != seq_id_b) {
- return seq_id_a < seq_id_b;
- }
- }
- }
- // when all else is equal, sort by pos
- if (batch.pos) {
- return batch.pos[a] < batch.pos[b];
- }
- // no pos, sort by id (assuming batch.all_pos_1 is positive)
- return a < b;
- }
- // shared prompts go first
- return n_seq_a > n_seq_b;
- }
- );
- // init seq
- llama_sbatch_seq * last_seq = nullptr;
- if (batch.n_seq_id != nullptr && batch.seq_id != nullptr) {
- for (size_t i = 0; i < n_tokens; ++i) {
- const size_t bi = ids[i];
- const int32_t n_seqs = batch.n_seq_id[bi];
- llama_seq_id * seq_ids = batch.seq_id[bi];
- if (last_seq != nullptr) {
- bool same = n_seqs == last_seq->n_seq_id;
- for (int32_t j = 0; same && j < n_seqs; ++j) {
- if (seq_ids[j] != last_seq->seq_id[j]) {
- same = false;
- }
- }
- if (same) {
- last_seq->length += 1;
- continue;
- }
- }
- llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1, batch.all_seq_id};
- seq.push_back(new_seq);
- last_seq = &seq.back();
- }
- } else {
- llama_sbatch_seq new_seq = {1, nullptr, 0, n_tokens, batch.all_seq_id};
- seq.push_back(new_seq);
- }
- // keep shared prompts first at the end, then sort by length descending.
- std::sort(seq.begin(), seq.end(),
- [](llama_sbatch_seq & a, llama_sbatch_seq & b) {
- if (a.n_seq_id == b.n_seq_id) {
- return a.length > b.length;
- }
- return a.n_seq_id < b.n_seq_id;
- }
- );
- }
- };
- struct llama_context {
- llama_context(const llama_model & model)
- : model(model)
- , t_start_us(model.t_start_us)
- , t_load_us(model.t_load_us) {}
- ~llama_context() {
- ggml_backend_sched_free(sched);
- for (ggml_backend_t backend : backends) {
- ggml_backend_free(backend);
- }
- ggml_backend_buffer_free(buf_output);
- }
- const struct llama_model & model;
- struct llama_cparams cparams;
- struct llama_sbatch sbatch;
- struct llama_kv_cache kv_self;
- struct llama_control_vector cvec;
- std::unordered_map<struct llama_lora_adapter *, float> lora_adapters;
- std::vector<ggml_backend_t> backends;
- #ifdef GGML_USE_METAL
- ggml_backend_t backend_metal = nullptr;
- #endif
- #ifdef GGML_USE_BLAS
- ggml_backend_t backend_blas = nullptr;
- #endif
- ggml_backend_t backend_cpu = nullptr;
- ggml_threadpool_t threadpool = nullptr;
- ggml_threadpool_t threadpool_batch = nullptr;
- bool has_evaluated_once = false;
- mutable int64_t t_start_us;
- mutable int64_t t_load_us;
- mutable int64_t t_p_eval_us = 0;
- mutable int64_t t_eval_us = 0;
- mutable int64_t t_compute_start_us = 0;
- mutable int64_t n_queued_tokens = 0;
- mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
- mutable int32_t n_eval = 0; // number of eval calls
- // host buffer for the model output (logits and embeddings)
- ggml_backend_buffer_t buf_output = nullptr;
- // decode output (2-dimensional array: [n_outputs][n_vocab])
- size_t logits_size = 0; // capacity (of floats) for logits
- float * logits = nullptr;
- std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
- size_t output_size = 0; // capacity (of tokens positions) for the output buffers
- int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
- bool logits_all = false;
- // embeddings output (2-dimensional array: [n_outputs][n_embd])
- // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
- size_t embd_size = 0; // capacity (of floats) for embeddings
- float * embd = nullptr;
- // sequence embeddings output (map of [n_embd] vectors)
- // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
- std::map<llama_seq_id, std::vector<float>> embd_seq;
- // whether we are computing encoder output or decoder output
- bool is_encoding = false;
- // output of the encoder part of the encoder-decoder models
- std::vector<float> embd_enc;
- std::vector<std::set<llama_seq_id>> seq_ids_enc;
- // memory buffers used to evaluate the model
- std::vector<uint8_t> buf_compute_meta;
- ggml_backend_sched_t sched = nullptr;
- ggml_abort_callback abort_callback = nullptr;
- void * abort_callback_data = nullptr;
- // input tensors
- struct ggml_tensor * inp_tokens; // I32 [n_batch]
- struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
- struct ggml_tensor * inp_pos; // I32 [n_batch]
- struct ggml_tensor * inp_out_ids; // I32 [n_outputs]
- struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch]
- struct ggml_tensor * inp_KQ_mask_swa; // F32 [kv_size, n_batch]
- struct ggml_tensor * inp_K_shift; // I32 [kv_size]
- struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
- struct ggml_tensor * inp_cls; // I32 [n_batch]
- struct ggml_tensor * inp_s_copy; // I32 [kv_size]
- struct ggml_tensor * inp_s_mask; // F32 [1, n_kv]
- struct ggml_tensor * inp_s_seq; // I32 [n_kv, n_batch]
- struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch]
- struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc]
- struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
- struct ggml_tensor * inp_cross_attn_state; // F32 [4, n_embd, 1061]
- };
- struct llama_lora_weight {
- struct ggml_tensor * a = nullptr;
- struct ggml_tensor * b = nullptr;
- llama_lora_weight() = default;
- llama_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b): a(a), b(b) {}
- };
- struct llama_lora_adapter {
- struct llama_model * base_model;
- // map tensor name to lora_a_b
- std::unordered_map<std::string, struct llama_lora_weight> ab_map;
- std::vector<struct ggml_context *> ctxs;
- std::vector<ggml_backend_buffer_t> bufs;
- float alpha;
- llama_lora_adapter(struct llama_model * base_model): base_model(base_model) {
- base_model->lora_adapters.insert(this);
- }
- llama_lora_weight * get_weight(struct ggml_tensor * w) {
- std::string name(w->name);
- auto pos = ab_map.find(name);
- if (ab_map.find(name) != ab_map.end()) {
- return &pos->second;
- }
- return nullptr;
- }
- ~llama_lora_adapter() {
- for (struct ggml_context * ctx : ctxs) {
- ggml_free(ctx);
- }
- for (ggml_backend_buffer_t buf : bufs) {
- ggml_backend_buffer_free(buf);
- }
- auto pos = base_model->lora_adapters.find(this);
- if (pos != base_model->lora_adapters.end()) {
- base_model->lora_adapters.erase(pos);
- }
- }
- };
- static size_t llama_get_device_count(const llama_model & model) {
- size_t count = 1;
- #if defined(GGML_USE_CUDA)
- count = ggml_backend_cuda_get_device_count();
- #elif defined(GGML_USE_SYCL)
- count = ggml_backend_sycl_get_device_count();
- #elif defined(GGML_USE_VULKAN)
- count = ggml_backend_vk_get_device_count();
- #elif defined(GGML_USE_CANN)
- return ggml_backend_cann_get_device_count();
- #endif
- #if defined(GGML_USE_RPC)
- count += model.rpc_servers.size();
- #endif
- return count;
- GGML_UNUSED(model);
- }
- static ggml_backend_buffer_type_t llama_default_buffer_type_offload(const llama_model & model, int gpu) {
- ggml_backend_buffer_type_t buft = nullptr;
- #ifdef GGML_USE_RPC
- int rpc_count = (int)model.rpc_servers.size();
- #else
- int rpc_count = 0;
- #endif
- int local_gpu = gpu - rpc_count;
- #if defined(GGML_USE_RPC)
- if (gpu < rpc_count) {
- const char * endpoint = model.rpc_servers[gpu].c_str();
- return ggml_backend_rpc_buffer_type(endpoint);
- }
- #endif
- #if defined(GGML_USE_METAL)
- buft = ggml_backend_metal_buffer_type();
- #elif defined(GGML_USE_CUDA)
- buft = ggml_backend_cuda_buffer_type(local_gpu);
- #elif defined(GGML_USE_VULKAN)
- buft = ggml_backend_vk_buffer_type(local_gpu);
- #elif defined(GGML_USE_SYCL)
- buft = ggml_backend_sycl_buffer_type(local_gpu);
- #elif defined(GGML_USE_KOMPUTE)
- buft = ggml_backend_kompute_buffer_type(local_gpu);
- if (buft == nullptr) {
- LLAMA_LOG_WARN("%s: cannot use GPU %d, check `vulkaninfo --summary`\n", __func__, local_gpu);
- }
- #elif defined(GGML_USE_CANN)
- buft = ggml_backend_cann_buffer_type(local_gpu);
- #endif
- if (buft == nullptr) {
- buft = llama_default_buffer_type_cpu(true);
- }
- return buft;
- GGML_UNUSED(model);
- GGML_UNUSED(local_gpu);
- }
- static ggml_backend_buffer_type_t llama_default_buffer_type_split(const llama_model & model, int fallback_gpu, const float * tensor_split) {
- ggml_backend_buffer_type_t buft = nullptr;
- #ifdef GGML_USE_CUDA
- if (ggml_backend_cuda_get_device_count() > 1) {
- buft = ggml_backend_cuda_split_buffer_type(tensor_split);
- }
- #endif
- #ifdef GGML_USE_SYCL
- if (ggml_backend_sycl_get_device_count() > 1) {
- buft = ggml_backend_sycl_split_buffer_type(tensor_split);
- }
- #endif
- if (buft == nullptr) {
- buft = llama_default_buffer_type_offload(model, fallback_gpu);
- }
- return buft;
- GGML_UNUSED(tensor_split);
- }
- static size_t llama_get_device_memory(const llama_model & model, int device) {
- #ifdef GGML_USE_RPC
- int rpc_count = (int)model.rpc_servers.size();
- #else
- int rpc_count = 0;
- #endif
- int local_device = device - rpc_count;
- #if defined(GGML_USE_RPC)
- if (device < rpc_count) {
- size_t total;
- size_t free;
- const char * endpoint = model.rpc_servers[device].c_str();
- ggml_backend_rpc_get_device_memory(endpoint, &free, &total);
- return free;
- }
- #endif
- #if defined(GGML_USE_CUDA)
- size_t total;
- size_t free;
- ggml_backend_cuda_get_device_memory(local_device, &free, &total);
- return free;
- #elif defined(GGML_USE_SYCL)
- size_t total;
- size_t free;
- ggml_backend_sycl_get_device_memory(local_device, &free, &total);
- return free;
- #elif defined(GGML_USE_VULKAN)
- size_t total;
- size_t free;
- ggml_backend_vk_get_device_memory(local_device, &free, &total);
- return free;
- #elif defined(GGML_USE_CANN)
- size_t total;
- size_t free;
- ggml_backend_cann_get_device_memory(local_device, &free, &total);
- return free;
- #else
- return 1;
- #endif
- GGML_UNUSED(model);
- GGML_UNUSED(local_device);
- }
- //
- // kv cache helpers
- //
- static bool llama_kv_cache_init(
- struct llama_kv_cache & cache,
- const llama_context * ctx,
- ggml_type type_k,
- ggml_type type_v,
- uint32_t kv_size,
- bool offload) {
- const llama_model & model = ctx->model;
- const llama_cparams & cparams = ctx->cparams;
- const struct llama_hparams & hparams = model.hparams;
- const int64_t n_layer = hparams.n_layer;
- cache.has_shift = false;
- cache.recurrent = llama_model_is_recurrent(&model);
- cache.v_trans = !cache.recurrent && !cparams.flash_attn;
- cache.head = 0;
- cache.size = kv_size;
- cache.used = 0;
- cache.type_k = type_k;
- cache.type_v = type_v;
- cache.cells.clear();
- cache.cells.resize(kv_size);
- // count used buffer types
- std::map<ggml_backend_buffer_type_t, int> buft_layer_count;
- if (offload) {
- for (int64_t i = 0; i < n_layer; ++i) {
- buft_layer_count[model.buft_layer[i].buft]++;
- }
- } else {
- buft_layer_count[llama_default_buffer_type_cpu(true)] = n_layer;
- }
- // create a context for each buffer type
- std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
- for (auto & it : buft_layer_count) {
- int n_layers = it.second;
- struct ggml_init_params params = {
- /*.mem_size =*/ 2u*n_layers*ggml_tensor_overhead(),
- /*.mem_buffer =*/ NULL,
- /*.no_alloc =*/ true,
- };
- ggml_context * ctx = ggml_init(params);
- if (!ctx) {
- LLAMA_LOG_ERROR("%s: failed to allocate context for kv cache\n", __func__);
- return false;
- }
- ctx_map[it.first] = ctx;
- cache.ctxs.push_back(ctx);
- }
- cache.k_l.reserve(n_layer);
- cache.v_l.reserve(n_layer);
- for (int i = 0; i < (int) n_layer; i++) {
- // for cross attention layers
- if (model.arch == LLM_ARCH_MLLAMA && hparams.cross_attention_layers(i)) {
- struct ggml_context * ctx = offload ? ctx_map.at(model.buft_layer[i].buft) : cache.ctxs.front();
- ggml_tensor * k = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, hparams.n_embd_head_k, 6404, hparams.n_head_kv(i));
- ggml_tensor * v = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, hparams.n_embd_head_v, 6404, hparams.n_head_kv(i));
- ggml_format_name(k, "cache_k_l%d", i);
- ggml_format_name(v, "cache_v_l%d", i);
- cache.k_l.push_back(k);
- cache.v_l.push_back(v);
- continue;
- }
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
- struct ggml_context * ctx = offload ? ctx_map.at(model.buft_layer[i].buft) : cache.ctxs.front();
- ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size);
- ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size);
- ggml_format_name(k, "cache_k_l%d", i);
- ggml_format_name(v, "cache_v_l%d", i);
- cache.k_l.push_back(k);
- cache.v_l.push_back(v);
- }
- // allocate tensors and initialize the buffers to avoid NaNs in the padding
- for (auto it : ctx_map) {
- ggml_backend_buffer_type_t buft = it.first;
- ggml_context * ctx = it.second;
- ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
- if (!buf) {
- LLAMA_LOG_ERROR("%s: failed to allocate buffer for kv cache\n", __func__);
- return false;
- }
- ggml_backend_buffer_clear(buf, 0);
- LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
- cache.bufs.push_back(buf);
- }
- return true;
- }
- // find an empty slot of size "n_tokens" in the cache
- // updates the cache head
- // Note: On success, it's important that cache.head points
- // to the first cell of the slot.
- static bool llama_kv_cache_find_slot(
- struct llama_kv_cache & cache,
- const struct llama_ubatch & batch) {
- const uint32_t n_tokens = batch.n_tokens;
- const uint32_t n_seqs = batch.n_seqs;
- const uint32_t n_seq_tokens = batch.n_seq_tokens;
- if (cache.recurrent) {
- // For recurrent state architectures (like Mamba or RWKV),
- // each cache cell can store the state for a whole sequence.
- // A slot should be always be contiguous.
- // can only process batches with an equal number of new tokens in each sequence
- GGML_ASSERT(batch.equal_seqs);
- int32_t min = cache.size - 1;
- int32_t max = 0;
- // everything should fit if all seq_ids are smaller than the max
- for (uint32_t s = 0; s < n_seqs; ++s) {
- const uint32_t n_seq_id = batch.n_seq_id[s];
- for (uint32_t j = 0; j < n_seq_id; ++j) {
- const llama_seq_id seq_id = batch.seq_id[s][j];
- if (seq_id < 0 || (uint32_t) seq_id >= cache.size) {
- // too big seq_id
- // TODO: would it be possible to resize the cache instead?
- LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%d Try using a bigger --parallel value\n", __func__, seq_id, cache.size);
- return false;
- }
- if (j > 0) {
- llama_kv_cell & seq = cache.cells[seq_id];
- if (seq.tail >= 0) {
- llama_kv_cell & cell = cache.cells[seq.tail];
- // clear cells from seq_ids that become shared
- // (should not normally happen, but let's handle it anyway)
- cell.seq_id.erase(seq_id);
- seq.tail = -1;
- if (cell.seq_id.empty()) {
- cell.pos = -1;
- cell.src = -1;
- cache.used -= 1;
- }
- }
- }
- }
- }
- #ifndef NDEBUG
- {
- std::vector<int32_t> tails_verif;
- tails_verif.assign(cache.size, -1);
- for (uint32_t i = 0; i < cache.size; ++i) {
- llama_kv_cell & cell = cache.cells[i];
- for (llama_seq_id seq_id : cell.seq_id) {
- if (tails_verif[seq_id] != -1) {
- LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]);
- }
- tails_verif[seq_id] = i;
- }
- }
- for (uint32_t i = 0; i < cache.size; ++i) {
- if (tails_verif[i] != cache.cells[i].tail) {
- LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cache.cells[i].tail, tails_verif[i]);
- }
- }
- }
- #endif
- // find next empty cell
- uint32_t next_empty_cell = cache.head;
- for (uint32_t i = 0; i < cache.size; ++i) {
- if (next_empty_cell >= cache.size) { next_empty_cell -= cache.size; }
- llama_kv_cell & cell = cache.cells[next_empty_cell];
- if (cell.is_empty()) { break; }
- next_empty_cell += 1;
- }
- // find usable cell range
- for (uint32_t s = 0; s < n_seqs; ++s) {
- const llama_seq_id seq_id = batch.seq_id[s][0];
- llama_kv_cell & seq_meta = cache.cells[seq_id];
- bool has_cell = false;
- if (seq_meta.tail >= 0) {
- llama_kv_cell & cell = cache.cells[seq_meta.tail];
- GGML_ASSERT(cell.has_seq_id(seq_id));
- // does this seq_id "own" the cell?
- if (cell.seq_id.size() == 1) { has_cell = true; }
- }
- if (!has_cell) {
- llama_kv_cell & empty_cell = cache.cells[next_empty_cell];
- GGML_ASSERT(empty_cell.is_empty());
- // copy old tail into the empty cell
- if (seq_meta.tail >= 0) {
- llama_kv_cell & orig_cell = cache.cells[seq_meta.tail];
- empty_cell.pos = orig_cell.pos;
- empty_cell.src = orig_cell.src;
- orig_cell.seq_id.erase(seq_id);
- empty_cell.seq_id.insert(seq_id); // will be overwritten
- }
- seq_meta.tail = next_empty_cell;
- // find next empty cell
- if (s + 1 < n_seqs) {
- next_empty_cell += 1;
- for (uint32_t i = 0; i < cache.size; ++i) {
- if (next_empty_cell >= cache.size) { next_empty_cell -= cache.size; }
- llama_kv_cell & cell = cache.cells[next_empty_cell];
- if (cell.is_empty()) { break; }
- next_empty_cell += 1;
- }
- }
- }
- if (min > seq_meta.tail) { min = seq_meta.tail; }
- if (max < seq_meta.tail) { max = seq_meta.tail; }
- }
- // gather and re-order
- for (uint32_t s = 0; s < n_seqs; ++s) {
- int32_t dst_id = s + min;
- int32_t src_id = cache.cells[batch.seq_id[s][0]].tail;
- if (dst_id != src_id) {
- llama_kv_cell & dst_cell = cache.cells[dst_id];
- llama_kv_cell & src_cell = cache.cells[src_id];
- std::swap(dst_cell.pos, src_cell.pos);
- std::swap(dst_cell.src, src_cell.src);
- std::swap(dst_cell.seq_id, src_cell.seq_id);
- // swap tails (assuming they NEVER overlap)
- for (const llama_seq_id seq_id : src_cell.seq_id) {
- cache.cells[seq_id].tail = src_id;
- }
- for (const llama_seq_id seq_id : dst_cell.seq_id) {
- cache.cells[seq_id].tail = dst_id;
- }
- }
- }
- // update the pos of the used seqs
- for (uint32_t s = 0; s < n_seqs; ++s) {
- const llama_pos last_pos = batch.pos[n_seq_tokens * s + n_seq_tokens - 1];
- int32_t cell_id = s + min;
- llama_kv_cell & cell = cache.cells[cell_id];
- if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) {
- // What should happen when the pos backtracks or skips a value?
- // Clearing the state mid-batch would require special-casing which isn't done.
- LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
- __func__, last_pos, cell.pos, batch.seq_id[s][0], n_seq_tokens);
- }
- cell.pos = last_pos;
- cell.seq_id.clear();
- for (int32_t j = 0; j < batch.n_seq_id[s]; ++j) {
- const llama_seq_id seq_id = batch.seq_id[s][j];
- cell.seq_id.insert(seq_id);
- cache.cells[seq_id].tail = cell_id;
- }
- }
- // allow getting the range of used cells, from head to head + n
- cache.head = min;
- cache.n = max - min + 1;
- // sanity check
- return cache.n >= n_seqs;
- }
- // otherwise, one cell per token.
- if (n_tokens > cache.size) {
- LLAMA_LOG_ERROR("%s: n_tokens=%d > cache.size=%d\n", __func__, n_tokens, cache.size);
- return false;
- }
- uint32_t n_tested = 0;
- while (true) {
- if (cache.head + n_tokens > cache.size) {
- n_tested += cache.size - cache.head;
- cache.head = 0;
- continue;
- }
- bool found = true;
- for (uint32_t i = 0; i < n_tokens; i++) {
- if (cache.cells[cache.head + i].pos >= 0) {
- found = false;
- cache.head += i + 1;
- n_tested += i + 1;
- break;
- }
- }
- if (found) {
- break;
- }
- if (n_tested >= cache.size) {
- //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
- return false;
- }
- }
- for (uint32_t s = 0; s < n_seqs; s++) {
- for (uint32_t i = 0; i < n_seq_tokens; ++i) {
- uint32_t k = s*n_seq_tokens + i;
- cache.cells[cache.head + k].pos = batch.pos[k];
- for (int32_t j = 0; j < batch.n_seq_id[s]; j++) {
- cache.cells[cache.head + k].seq_id.insert(batch.seq_id[s][j]);
- }
- }
- }
- cache.used += n_tokens;
- return true;
- }
- // find how many cells are currently in use
- static uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) {
- for (uint32_t i = cache.size; i > 0; --i) {
- const llama_kv_cell & cell = cache.cells[i - 1];
- if (cell.pos >= 0 && !cell.is_empty()) {
- return i;
- }
- }
- return 0;
- }
- static void llama_kv_cache_clear(struct llama_kv_cache & cache) {
- for (int32_t i = 0; i < (int32_t) cache.size; ++i) {
- cache.cells[i].pos = -1;
- cache.cells[i].seq_id.clear();
- cache.cells[i].src = -1;
- cache.cells[i].tail = -1;
- }
- cache.head = 0;
- cache.used = 0;
- for (auto & buf : cache.bufs) {
- ggml_backend_buffer_clear(buf, 0);
- }
- }
- static bool llama_kv_cache_seq_rm(
- struct llama_kv_cache & cache,
- llama_seq_id seq_id,
- llama_pos p0,
- llama_pos p1) {
- uint32_t new_head = cache.size;
- if (p0 < 0) p0 = 0;
- if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
- // models like Mamba or RWKV can't have a state partially erased
- if (cache.recurrent) {
- if (seq_id >= (int64_t) cache.size) {
- // could be fatal
- return false;
- }
- if (0 <= seq_id) {
- int32_t & tail_id = cache.cells[seq_id].tail;
- if (tail_id >= 0) {
- const llama_kv_cell & cell = cache.cells[tail_id];
- // partial intersection is invalid
- if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) {
- return false;
- }
- // invalidate tails which will be cleared
- if (p0 <= cell.pos && cell.pos < p1) {
- tail_id = -1;
- }
- }
- } else {
- // seq_id is negative, then the range should include everything or nothing
- if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits<llama_pos>::max())) {
- return false;
- }
- }
- }
- for (uint32_t i = 0; i < cache.size; ++i) {
- if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
- if (seq_id < 0) {
- cache.cells[i].seq_id.clear();
- } else if (cache.cells[i].has_seq_id(seq_id)) {
- cache.cells[i].seq_id.erase(seq_id);
- } else {
- continue;
- }
- if (cache.cells[i].is_empty()) {
- // keep count of the number of used cells
- if (cache.cells[i].pos >= 0) cache.used--;
- cache.cells[i].pos = -1;
- cache.cells[i].src = -1;
- if (new_head == cache.size) new_head = i;
- }
- }
- }
- // If we freed up a slot, set head to it so searching can start there.
- if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
- return true;
- }
- static void llama_kv_cache_seq_cp(
- struct llama_kv_cache & cache,
- llama_seq_id seq_id_src,
- llama_seq_id seq_id_dst,
- llama_pos p0,
- llama_pos p1) {
- if (p0 < 0) p0 = 0;
- if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
- if (cache.recurrent) {
- if ((uint32_t) seq_id_dst < cache.size && (uint32_t) seq_id_src < cache.size) {
- llama_kv_cell & tail_src = cache.cells[seq_id_src];
- llama_kv_cell & tail_dst = cache.cells[seq_id_dst];
- if (tail_dst.tail >= 0) {
- // clear destination seq_id if it wasn't empty
- llama_kv_cell & cell_dst = cache.cells[tail_dst.tail];
- cell_dst.seq_id.erase(seq_id_dst);
- tail_dst.tail = -1;
- if (cell_dst.seq_id.empty()) {
- cell_dst.pos = -1;
- cell_dst.delta = -1;
- cell_dst.src = -1;
- cache.used -= 1;
- }
- }
- if (tail_src.tail >= 0) {
- llama_kv_cell & cell_src = cache.cells[tail_src.tail];
- cell_src.seq_id.insert(seq_id_dst);
- tail_dst.tail = tail_src.tail;
- }
- }
- return;
- }
- // otherwise, this is the KV cache of a Transformer-like model
- cache.head = 0;
- for (uint32_t i = 0; i < cache.size; ++i) {
- if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
- cache.cells[i].seq_id.insert(seq_id_dst);
- }
- }
- }
- static void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) {
- uint32_t new_head = cache.size;
- for (uint32_t i = 0; i < cache.size; ++i) {
- if (cache.recurrent && (llama_seq_id) i != seq_id) {
- cache.cells[i].tail = -1;
- }
- if (!cache.cells[i].has_seq_id(seq_id)) {
- if (cache.cells[i].pos >= 0) cache.used--;
- cache.cells[i].pos = -1;
- cache.cells[i].src = -1;
- cache.cells[i].seq_id.clear();
- if (new_head == cache.size) new_head = i;
- } else {
- cache.cells[i].seq_id.clear();
- cache.cells[i].seq_id.insert(seq_id);
- }
- }
- // If we freed up a slot, set head to it so searching can start there.
- if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
- }
- static void llama_kv_cache_seq_add(
- struct llama_kv_cache & cache,
- llama_seq_id seq_id,
- llama_pos p0,
- llama_pos p1,
- llama_pos delta) {
- uint32_t new_head = cache.size;
- if (p0 < 0) p0 = 0;
- if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
- // If there is no range then return early to avoid looping over the cache.
- if (p0 == p1) return;
- if (cache.recurrent) {
- // for Mamba-like or RWKV models, only the pos needs to be shifted
- if (0 <= seq_id && seq_id < (int64_t) cache.size) {
- const int32_t tail_id = cache.cells[seq_id].tail;
- if (tail_id >= 0) {
- llama_kv_cell & cell = cache.cells[tail_id];
- if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
- cell.pos += delta;
- }
- }
- }
- return;
- }
- for (uint32_t i = 0; i < cache.size; ++i) {
- if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
- cache.has_shift = true;
- cache.cells[i].pos += delta;
- cache.cells[i].delta += delta;
- if (cache.cells[i].pos < 0) {
- if (!cache.cells[i].is_empty()) {
- cache.used--;
- }
- cache.cells[i].pos = -1;
- cache.cells[i].seq_id.clear();
- if (new_head == cache.size) {
- new_head = i;
- }
- }
- }
- }
- // If we freed up a slot, set head to it so searching can start there.
- // Otherwise we just start the next search from the beginning.
- cache.head = new_head != cache.size ? new_head : 0;
- }
- static void llama_kv_cache_seq_div(
- struct llama_kv_cache & cache,
- llama_seq_id seq_id,
- llama_pos p0,
- llama_pos p1,
- int d) {
- if (p0 < 0) p0 = 0;
- if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
- // If there is no range then return early to avoid looping over the cache.
- if (p0 == p1) return;
- if (cache.recurrent) {
- // for Mamba-like or RWKV models, only the pos needs to be changed
- if (0 <= seq_id && seq_id < (int64_t) cache.size) {
- const int32_t tail_id = cache.cells[seq_id].tail;
- if (tail_id >= 0) {
- llama_kv_cell & cell = cache.cells[tail_id];
- if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
- cell.pos /= d;
- }
- }
- }
- return;
- }
- for (uint32_t i = 0; i < cache.size; ++i) {
- if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
- cache.has_shift = true;
- {
- llama_pos p_old = cache.cells[i].pos;
- cache.cells[i].pos /= d;
- cache.cells[i].delta += cache.cells[i].pos - p_old;
- }
- }
- }
- }
- static llama_pos llama_kv_cache_seq_pos_max(struct llama_kv_cache & cache, llama_seq_id seq_id) {
- llama_pos result = 0;
- for (uint32_t i = 0; i < cache.size; ++i) {
- if (cache.cells[i].has_seq_id(seq_id)) {
- result = std::max(result, cache.cells[i].pos);
- }
- }
- return result;
- }
- static void llama_kv_cache_defrag(struct llama_kv_cache & cache) {
- if (!cache.recurrent) {
- cache.do_defrag = true;
- }
- }
- static uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams) {
- // the FA kernels require padding to avoid extra runtime boundary checks
- return cparams.flash_attn ? 256u : 32u;
- }
- //
- // model loading and saving
- //
- enum llama_fver {
- GGUF_FILE_VERSION_V1 = 1,
- GGUF_FILE_VERSION_V2 = 2,
- GGUF_FILE_VERSION_V3 = 3,
- };
- static const char * llama_file_version_name(llama_fver version) {
- switch (version) {
- case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
- case GGUF_FILE_VERSION_V2: return "GGUF V2";
- case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
- }
- return "unknown";
- }
- static std::string llama_format_tensor_shape(const std::vector<int64_t> & ne) {
- char buf[256];
- snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
- for (size_t i = 1; i < ne.size(); i++) {
- snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
- }
- return buf;
- }
- static std::string llama_format_tensor_shape(const struct ggml_tensor * t) {
- char buf[256];
- snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]);
- for (int i = 1; i < GGML_MAX_DIMS; i++) {
- snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]);
- }
- return buf;
- }
- namespace GGUFMeta {
- template <typename T, gguf_type gt_, T (*gfun)(const gguf_context *, const int)>
- struct GKV_Base_Type {
- static constexpr gguf_type gt = gt_;
- static T getter(const gguf_context * ctx, const int kid) {
- return gfun(ctx, kid);
- }
- };
- template<typename T> struct GKV_Base;
- template<> struct GKV_Base<bool >: GKV_Base_Type<bool, GGUF_TYPE_BOOL, gguf_get_val_bool> {};
- template<> struct GKV_Base<uint8_t >: GKV_Base_Type<uint8_t, GGUF_TYPE_UINT8, gguf_get_val_u8 > {};
- template<> struct GKV_Base<uint16_t >: GKV_Base_Type<uint16_t, GGUF_TYPE_UINT16, gguf_get_val_u16 > {};
- template<> struct GKV_Base<uint32_t >: GKV_Base_Type<uint32_t, GGUF_TYPE_UINT32, gguf_get_val_u32 > {};
- template<> struct GKV_Base<uint64_t >: GKV_Base_Type<uint64_t, GGUF_TYPE_UINT64, gguf_get_val_u64 > {};
- template<> struct GKV_Base<int8_t >: GKV_Base_Type<int8_t, GGUF_TYPE_INT8, gguf_get_val_i8 > {};
- template<> struct GKV_Base<int16_t >: GKV_Base_Type<int16_t, GGUF_TYPE_INT16, gguf_get_val_i16 > {};
- template<> struct GKV_Base<int32_t >: GKV_Base_Type<int32_t, GGUF_TYPE_INT32, gguf_get_val_i32 > {};
- template<> struct GKV_Base<int64_t >: GKV_Base_Type<int64_t, GGUF_TYPE_INT64, gguf_get_val_i64 > {};
- template<> struct GKV_Base<float >: GKV_Base_Type<float, GGUF_TYPE_FLOAT32, gguf_get_val_f32 > {};
- template<> struct GKV_Base<double >: GKV_Base_Type<double, GGUF_TYPE_FLOAT64, gguf_get_val_f64 > {};
- template<> struct GKV_Base<const char *>: GKV_Base_Type<const char *, GGUF_TYPE_STRING, gguf_get_val_str > {};
- template<> struct GKV_Base<std::string> {
- static constexpr gguf_type gt = GGUF_TYPE_STRING;
- static std::string getter(const gguf_context * ctx, const int kid) {
- return gguf_get_val_str(ctx, kid);
- }
- };
- struct ArrayInfo {
- const gguf_type gt;
- const size_t length;
- const void * data;
- };
- template<> struct GKV_Base<ArrayInfo> {
- public:
- static constexpr gguf_type gt = GGUF_TYPE_ARRAY;
- static ArrayInfo getter(const gguf_context *ctx, const int k) {
- return ArrayInfo {
- gguf_get_arr_type(ctx, k),
- size_t(gguf_get_arr_n(ctx, k)),
- gguf_get_arr_data(ctx, k),
- };
- }
- };
- template<typename T>
- class GKV : public GKV_Base<T> {
- GKV() = delete;
- public:
- static T get_kv(const gguf_context * ctx, const int k) {
- const enum gguf_type kt = gguf_get_kv_type(ctx, k);
- if (kt != GKV::gt) {
- throw std::runtime_error(format("key %s has wrong type %s but expected type %s",
- gguf_get_key(ctx, k), gguf_type_name(kt), gguf_type_name(GKV::gt)));
- }
- return GKV::getter(ctx, k);
- }
- static const char * override_type_to_str(const llama_model_kv_override_type ty) {
- switch (ty) {
- case LLAMA_KV_OVERRIDE_TYPE_BOOL: return "bool";
- case LLAMA_KV_OVERRIDE_TYPE_INT: return "int";
- case LLAMA_KV_OVERRIDE_TYPE_FLOAT: return "float";
- case LLAMA_KV_OVERRIDE_TYPE_STR: return "str";
- }
- return "unknown";
- }
- static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override * ovrd) {
- if (!ovrd) { return false; }
- if (ovrd->tag == expected_type) {
- LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ",
- __func__, override_type_to_str(ovrd->tag), ovrd->key);
- switch (ovrd->tag) {
- case LLAMA_KV_OVERRIDE_TYPE_BOOL: {
- LLAMA_LOG_INFO("%s\n", ovrd->val_bool ? "true" : "false");
- } break;
- case LLAMA_KV_OVERRIDE_TYPE_INT: {
- LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->val_i64);
- } break;
- case LLAMA_KV_OVERRIDE_TYPE_FLOAT: {
- LLAMA_LOG_INFO("%.6f\n", ovrd->val_f64);
- } break;
- case LLAMA_KV_OVERRIDE_TYPE_STR: {
- LLAMA_LOG_INFO("%s\n", ovrd->val_str);
- } break;
- default:
- // Shouldn't be possible to end up here, but just in case...
- throw std::runtime_error(
- format("Unsupported attempt to override %s type for metadata key %s\n",
- override_type_to_str(ovrd->tag), ovrd->key));
- }
- return true;
- }
- LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n",
- __func__, ovrd->key, override_type_to_str(expected_type), override_type_to_str(ovrd->tag));
- return false;
- }
- template<typename OT>
- static typename std::enable_if<std::is_same<OT, bool>::value, bool>::type
- try_override(OT & target, const struct llama_model_kv_override * ovrd) {
- if (validate_override(LLAMA_KV_OVERRIDE_TYPE_BOOL, ovrd)) {
- target = ovrd->val_bool;
- return true;
- }
- return false;
- }
- template<typename OT>
- static typename std::enable_if<!std::is_same<OT, bool>::value && std::is_integral<OT>::value, bool>::type
- try_override(OT & target, const struct llama_model_kv_override * ovrd) {
- if (validate_override(LLAMA_KV_OVERRIDE_TYPE_INT, ovrd)) {
- target = ovrd->val_i64;
- return true;
- }
- return false;
- }
- template<typename OT>
- static typename std::enable_if<std::is_floating_point<OT>::value, bool>::type
- try_override(T & target, const struct llama_model_kv_override * ovrd) {
- if (validate_override(LLAMA_KV_OVERRIDE_TYPE_FLOAT, ovrd)) {
- target = ovrd->val_f64;
- return true;
- }
- return false;
- }
- template<typename OT>
- static typename std::enable_if<std::is_same<OT, std::string>::value, bool>::type
- try_override(T & target, const struct llama_model_kv_override * ovrd) {
- if (validate_override(LLAMA_KV_OVERRIDE_TYPE_STR, ovrd)) {
- target = ovrd->val_str;
- return true;
- }
- return false;
- }
- static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
- if (try_override<T>(target, ovrd)) {
- return true;
- }
- if (k < 0) { return false; }
- target = get_kv(ctx, k);
- return true;
- }
- static bool set(const gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
- return set(ctx, gguf_find_key(ctx, key), target, ovrd);
- }
- static bool set(const gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
- return set(ctx, key.c_str(), target, ovrd);
- }
- };
- }
- using llama_buf_map = std::unordered_map<uint32_t, ggml_backend_buffer_t>;
- static size_t llama_model_max_nodes(const llama_model & model) {
- return std::max<size_t>(8192, model.tensors_by_name.size()*5);
- }
- struct llama_model_loader {
- int n_kv = 0;
- int n_tensors = 0;
- int n_created = 0;
- int64_t n_elements = 0;
- size_t n_bytes = 0;
- bool use_mmap = false;
- bool check_tensors;
- llama_files files;
- llama_ftype ftype;
- llama_fver fver;
- llama_mmaps mappings;
- // Holds information on a model weight
- struct llama_tensor_weight {
- uint16_t idx; // source file index
- size_t offs; // tensor data offset in the original file
- ggml_tensor * tensor;
- llama_tensor_weight(const llama_file * file, uint16_t idx, const char * name, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) {
- const int tensor_idx = gguf_find_tensor(gguf_ctx, name);
- offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx);
- if (offs + ggml_nbytes(tensor) < offs || offs + ggml_nbytes(tensor) > file->size) {
- throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", name));
- }
- }
- };
- std::vector<llama_tensor_weight> weights;
- std::unordered_map<std::string, struct llama_model_kv_override> kv_overrides;
- struct gguf_context * meta = NULL;
- std::vector<ggml_context *> contexts;
- std::string arch_name;
- LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
- llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p) {
- int trace = 0;
- if (getenv("LLAMA_TRACE")) {
- trace = atoi(getenv("LLAMA_TRACE"));
- }
- if (param_overrides_p != nullptr) {
- for (const struct llama_model_kv_override * p = param_overrides_p; p->key[0] != 0; p++) {
- kv_overrides.insert({std::string(p->key), *p});
- }
- }
- struct ggml_context * ctx = NULL;
- struct gguf_init_params params = {
- /*.no_alloc = */ true,
- /*.ctx = */ &ctx,
- };
- meta = gguf_init_from_file(fname.c_str(), params);
- if (!meta) {
- throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
- }
- get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
- llm_kv = LLM_KV(llm_arch_from_string(arch_name));
- files.emplace_back(new llama_file(fname.c_str(), "rb"));
- contexts.emplace_back(ctx);
- // Save tensors data offset of the main file.
- // For subsidiary files, `meta` tensor data offset must not be used,
- // so we build a unified tensors index for weights.
- for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
- weights.emplace_back(files.back().get(), 0, cur->name, meta, cur);
- }
- uint16_t n_split = 0;
- get_key(llm_kv(LLM_KV_SPLIT_COUNT), n_split, false);
- // Load additional GGML contexts
- if (n_split > 1) {
- uint16_t idx = 0;
- get_key(llm_kv(LLM_KV_SPLIT_NO), idx);
- if (idx != 0) {
- throw std::runtime_error(format("illegal split file: %d, model must be loaded with the first split", idx));
- }
- char split_prefix[PATH_MAX] = {0};
- if (!llama_split_prefix(split_prefix, sizeof(split_prefix), fname.c_str(), idx, n_split)) {
- throw std::runtime_error(format("invalid split file: %s", fname.c_str()));
- }
- if (trace > 0) {
- LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split);
- }
- char split_path[PATH_MAX] = {0};
- for (idx = 1; idx < n_split; idx++) {
- llama_split_path(split_path, sizeof(split_path), split_prefix, idx, n_split);
- struct gguf_init_params split_params = {
- /*.no_alloc = */ true,
- /*.ctx = */ &ctx,
- };
- struct gguf_context * ctx_gguf = gguf_init_from_file(split_path, split_params);
- if (!ctx_gguf) {
- throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, split_path));
- }
- files.emplace_back(new llama_file(split_path, "rb"));
- contexts.emplace_back(ctx);
- // Save tensors data offset info of the shard.
- for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
- weights.emplace_back(files.back().get(), idx, cur->name, ctx_gguf, cur);
- }
- gguf_free(ctx_gguf);
- }
- get_key(llm_kv(LLM_KV_SPLIT_TENSORS_COUNT), n_tensors);
- // sanity check
- {
- const int n_tensors_loaded = (int) weights.size();
- if (n_tensors != n_tensors_loaded) {
- throw std::runtime_error(format("corrupted model: %d tensors expected but %d found", n_tensors, n_tensors_loaded));
- }
- }
- LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n", __func__, n_split - 1);
- }
- n_kv = gguf_get_n_kv(meta);
- n_tensors = weights.size();
- fver = (enum llama_fver) gguf_get_version(meta);
- std::set<std::string> tensor_names;
- for (auto & w : weights) {
- n_elements += ggml_nelements(w.tensor);
- n_bytes += ggml_nbytes(w.tensor);
- // make sure there is no duplicated tensor names
- const std::string name(w.tensor->name);
- auto found = tensor_names.find(name);
- if (found != tensor_names.end()) {
- throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", w.tensor->name));
- }
- tensor_names.insert(name);
- }
- LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
- __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
- // determine file type based on the number of tensors for each quantization and print meta data
- // TODO: make optional
- {
- std::map<enum ggml_type, uint32_t> n_type;
- uint32_t n_type_max = 0;
- enum ggml_type type_max = GGML_TYPE_F32;
- for (int i = 0; i < n_tensors; i++) {
- const ggml_tensor * tensor = weights.at(i).tensor;
- enum ggml_type type = tensor->type;
- n_type[type]++;
- if (n_type_max < n_type[type]) {
- n_type_max = n_type[type];
- type_max = type;
- }
- if (trace > 0) {
- const uint16_t sid = weights.at(i).idx;
- LLAMA_LOG_INFO("%s: - tensor %4d, split %2d: %32s %-8s [ %s ]\n", __func__, i, sid, ggml_get_name(tensor), ggml_type_name(type), llama_format_tensor_shape(tensor).c_str());
- }
- }
- switch (type_max) {
- case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
- case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
- case GGML_TYPE_BF16: ftype = LLAMA_FTYPE_MOSTLY_BF16; break;
- case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
- case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
- case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
- case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
- case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
- case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
- case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
- case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
- case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
- case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
- case GGML_TYPE_TQ1_0: ftype = LLAMA_FTYPE_MOSTLY_TQ1_0; break;
- case GGML_TYPE_TQ2_0: ftype = LLAMA_FTYPE_MOSTLY_TQ2_0; break;
- case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
- case GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break;
- case GGML_TYPE_IQ2_S: ftype = LLAMA_FTYPE_MOSTLY_IQ2_S; break;
- case GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break;
- case GGML_TYPE_IQ1_S: ftype = LLAMA_FTYPE_MOSTLY_IQ1_S; break;
- case GGML_TYPE_IQ1_M: ftype = LLAMA_FTYPE_MOSTLY_IQ1_M; break;
- case GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break;
- case GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break;
- case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break;
- case GGML_TYPE_Q4_0_4_4: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_4_4; break;
- case GGML_TYPE_Q4_0_4_8: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_4_8; break;
- case GGML_TYPE_Q4_0_8_8: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_8_8; break;
- default:
- {
- LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
- ftype = LLAMA_FTYPE_ALL_F32;
- } break;
- }
- // this is a way to mark that we have "guessed" the file type
- ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
- {
- const int kid = gguf_find_key(meta, "general.file_type"); // TODO: use LLM_KV
- if (kid >= 0) {
- ftype = (llama_ftype) gguf_get_val_u32(meta, kid);
- }
- }
- LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
- for (int i = 0; i < n_kv; i++) {
- const char * name = gguf_get_key(meta, i);
- const enum gguf_type type = gguf_get_kv_type(meta, i);
- const std::string type_name =
- type == GGUF_TYPE_ARRAY
- ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(meta, i)), gguf_get_arr_n(meta, i))
- : gguf_type_name(type);
- std::string value = gguf_kv_to_str(meta, i);
- const size_t MAX_VALUE_LEN = 40;
- if (value.size() > MAX_VALUE_LEN) {
- value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
- }
- replace_all(value, "\n", "\\n");
- LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
- }
- // print type counts
- for (auto & kv : n_type) {
- if (kv.second == 0) {
- continue;
- }
- LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second);
- }
- }
- if (!llama_mmap::SUPPORTED) {
- LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
- use_mmap = false;
- }
- this->use_mmap = use_mmap;
- this->check_tensors = check_tensors;
- }
- ~llama_model_loader() {
- if (meta) {
- gguf_free(meta);
- }
- for (auto * ctx : contexts) {
- ggml_free(ctx);
- }
- }
- template<typename T>
- typename std::enable_if<std::is_integral<T>::value, bool>::type
- get_arr_n(const std::string & key, T & result, const bool required = true) {
- const int kid = gguf_find_key(meta, key.c_str());
- if (kid < 0) {
- if (required) {
- throw std::runtime_error(format("key not found in model: %s", key.c_str()));
- }
- return false;
- }
- struct GGUFMeta::ArrayInfo arr_info =
- GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta, kid);
- result = arr_info.length;
- return true;
- }
- template<typename T>
- typename std::enable_if<std::is_integral<T>::value, bool>::type
- get_arr_n(const enum llm_kv kid, T & result, const bool required = true) {
- return get_arr_n(llm_kv(kid), result, required);
- }
- template<typename T>
- bool get_arr(const std::string & key, std::vector<T> & result, const bool required = true) {
- const int kid = gguf_find_key(meta, key.c_str());
- if (kid < 0 || gguf_get_kv_type(meta, kid) != GGUF_TYPE_ARRAY) {
- if (required) {
- throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
- }
- return false;
- }
- struct GGUFMeta::ArrayInfo arr_info =
- GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta, kid);
- switch (arr_info.gt) {
- case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same<T, float>::value)); break;
- case GGUF_TYPE_INT32: GGML_ASSERT(
- (std::is_same<T, int32_t>::value) ||
- (std::is_same<T, uint32_t>::value)); break;
- default:
- throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str()));
- }
- result.resize(arr_info.length);
- result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length);
- return true;
- }
- template<typename T, size_t N_MAX>
- bool get_arr(const std::string & key, std::array<T, N_MAX> & result, const bool required = true) {
- const int kid = gguf_find_key(meta, key.c_str());
- if (kid < 0 || gguf_get_kv_type(meta, kid) != GGUF_TYPE_ARRAY) {
- if (required) {
- throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
- }
- return false;
- }
- struct GGUFMeta::ArrayInfo arr_info =
- GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta, kid);
- switch (arr_info.gt) {
- case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same<T, float>::value)); break;
- case GGUF_TYPE_INT32: GGML_ASSERT(
- (std::is_same<T, int32_t>::value) ||
- (std::is_same<T, uint32_t>::value)); break;
- default:
- throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str()));
- }
- if (arr_info.length > N_MAX) {
- throw std::runtime_error(format("array length %u for key %s exceeds max %u", (uint32_t) arr_info.length, key.c_str(), (uint32_t) N_MAX));
- }
- std::copy((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length, result.begin());
- return true;
- }
- template<typename T>
- bool get_arr(const enum llm_kv kid, T & result, const bool required = true) {
- return get_arr(llm_kv(kid), result, required);
- }
- template<typename T>
- bool get_key(const std::string & key, T & result, const bool required = true) {
- auto it = kv_overrides.find(key);
- const struct llama_model_kv_override * override =
- it != kv_overrides.end() ? &it->second : nullptr;
- const bool found = GGUFMeta::GKV<T>::set(meta, key, result, override);
- if (required && !found) {
- throw std::runtime_error(format("key not found in model: %s", key.c_str()));
- }
- return found;
- }
- template<typename T>
- bool get_key(const enum llm_kv kid, T & result, const bool required = true) {
- return get_key(llm_kv(kid), result, required);
- }
- // get array of n <= N_MAX elements, or a single element repeated n times
- template<typename T, size_t N_MAX>
- bool get_key_or_arr(const std::string & key, std::array<T, N_MAX> & result, uint32_t n, const bool required = true) {
- const int kid = gguf_find_key(meta, key.c_str());
- if (kid < 0) {
- if (required) {
- throw std::runtime_error(format("key not found in model: %s", key.c_str()));
- }
- return false;
- }
- if (n > N_MAX) {
- throw std::runtime_error(format("n > N_MAX: %u > %u for key %s", (uint32_t) n, (uint32_t) N_MAX, key.c_str()));
- }
- if (gguf_get_kv_type(meta, kid) == GGUF_TYPE_ARRAY) {
- struct GGUFMeta::ArrayInfo arr_info =
- GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta, kid);
- if (n != arr_info.length) {
- throw std::runtime_error(format("key %s has wrong array length; expected %u, got %u", key.c_str(), n, (uint32_t) arr_info.length));
- }
- return get_arr(key, result, required);
- } else {
- T value;
- bool ok = get_key(key, value, required);
- if (!ok) {
- return false;
- }
- for (uint32_t i = 0; i < n; i++) {
- result[i] = value;
- }
- return true;
- }
- }
- template<typename T>
- bool get_key_or_arr(const enum llm_kv kid, T & result, uint32_t n, const bool required = true) {
- return get_key_or_arr(llm_kv(kid), result, n, required);
- }
- std::string get_arch_name() const {
- return arch_name;
- }
- enum llm_arch get_arch() const {
- return llm_kv.arch;
- }
- const char * get_tensor_name(int i) const {
- return weights.at(i).tensor->name;
- }
- const llama_tensor_weight * get_weight(const char * name) const {
- for (const auto & weight : weights) {
- if (strcmp(name, weight.tensor->name) == 0) {
- return &weight;
- }
- }
- return nullptr;
- }
- const llama_tensor_weight * get_weight(int i) const {
- return get_weight(get_tensor_name(i));
- }
- const llama_tensor_weight & require_weight(const char * name) const {
- const llama_tensor_weight * weight = get_weight(name);
- if (!weight) {
- throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name));
- }
- return *weight;
- }
- struct ggml_tensor * get_tensor_meta(const char * name) const {
- const auto * weight = get_weight(name);
- if (!weight) {
- return nullptr;
- }
- return weight->tensor;
- }
- struct ggml_tensor * require_tensor_meta(const char * name) const {
- struct ggml_tensor * tensor = get_tensor_meta(name);
- if (!tensor) {
- throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name));
- }
- return tensor;
- }
- struct ggml_tensor * get_tensor_meta(int i) const {
- return get_tensor_meta(get_tensor_name(i));
- }
- struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, const struct ggml_tensor * cur, bool duplicated) {
- struct ggml_tensor * tensor = ggml_dup_tensor(ctx, cur);
- ggml_set_name(tensor, ggml_get_name(cur));
- if (duplicated) {
- size_data += ggml_nbytes(cur);
- } else {
- n_created++;
- }
- return tensor;
- }
- const struct ggml_tensor * check_tensor_dims(const std::string & name, const std::vector<int64_t> & ne, bool required) const {
- const struct ggml_tensor * cur = get_tensor_meta(name.c_str());
- if (cur == NULL) {
- if (!required) {
- return NULL;
- }
- throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
- }
- {
- bool is_ok = true;
- for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
- if ((i < ne.size() && ne[i] != cur->ne[i]) || (i >= ne.size() && cur->ne[i] != 1)) {
- is_ok = false;
- break;
- }
- }
- if (!is_ok) {
- throw std::runtime_error(
- format("%s: tensor '%s' has wrong shape; expected %s, got %s",
- __func__, name.c_str(),
- llama_format_tensor_shape(ne).c_str(),
- llama_format_tensor_shape(cur).c_str()));
- }
- }
- return cur;
- }
- static const int TENSOR_NOT_REQUIRED = 1;
- static const int TENSOR_DUPLICATED = 2;
- struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, int flags = 0) {
- const struct ggml_tensor * cur = check_tensor_dims(name, ne, !(flags & TENSOR_NOT_REQUIRED));
- if (cur == NULL) {
- return NULL;
- }
- return create_tensor_for(ctx, cur, flags & TENSOR_DUPLICATED);
- }
- struct ggml_tensor * create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::vector<int64_t> & ne, size_t offset, bool required = true) {
- const struct ggml_tensor * cur = check_tensor_dims(name, ne, required);
- if (cur == NULL) {
- return NULL;
- }
- if (cur->type != base->type) {
- throw std::runtime_error(format("%s: tensor '%s' has wrong type; expected %s, got %s", __func__, name.c_str(), ggml_type_name(base->type), ggml_type_name(cur->type)));
- }
- std::array<int64_t, GGML_MAX_DIMS> dims;
- for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
- dims[i] = i < ne.size() ? ne[i] : 1;
- }
- struct ggml_tensor * tensor = ggml_view_4d(ctx, base,
- dims[0], dims[1], dims[2], dims[3],
- cur->nb[1], cur->nb[2], cur->nb[3],
- offset);
- ggml_set_name(tensor, name.c_str());
- n_created++;
- return tensor;
- }
- void done_getting_tensors() const {
- if (n_created != n_tensors) {
- throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
- }
- }
- void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr) {
- if (use_mmap) {
- mappings.reserve(files.size());
- mmaps_used.reserve(files.size());
- for (const auto & file : files) {
- std::unique_ptr<llama_mmap> mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, ggml_is_numa()));
- mmaps_used.emplace_back(mapping->size, 0);
- if (mlock_mmaps) {
- std::unique_ptr<llama_mlock> mlock_mmap(new llama_mlock());
- mlock_mmap->init(mapping->addr);
- mlock_mmaps->emplace_back(std::move(mlock_mmap));
- }
- mappings.emplace_back(std::move(mapping));
- }
- }
- // compute the total size of all tensors for progress reporting
- for (auto & w : weights) {
- size_data += ggml_nbytes(w.tensor);
- }
- }
- void get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, ggml_context * ctx) const {
- GGML_ASSERT(!mappings.empty());
- const auto & mapping = mappings.at(idx);
- *first = mapping->size;
- *last = 0;
- *addr = mapping->addr;
- for (ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor; tensor = ggml_get_next_tensor(ctx, tensor)) {
- try {
- const auto * weight = get_weight(ggml_get_name(tensor));
- if (!weight) {
- continue;
- }
- if (weight->idx != idx) {
- continue;
- }
- *first = std::min(*first, weight->offs);
- *last = std::max(*last, weight->offs + ggml_nbytes(tensor));
- } catch(...) {
- // the tensor is not in the model
- }
- }
- }
- // for backwards compatibility, does not support ggml-backend
- void load_data_for(struct ggml_tensor * cur) const {
- const auto & w = require_weight(ggml_get_name(cur));
- if (use_mmap) {
- const auto & mapping = mappings.at(w.idx);
- if (cur->data == nullptr) {
- cur->data = (uint8_t *)mapping->addr + w.offs;
- } else {
- memcpy(cur->data, (uint8_t *)mapping->addr + w.offs, ggml_nbytes(cur));
- }
- } else {
- GGML_ASSERT(cur->data != nullptr);
- GGML_ASSERT(w.idx < files.size());
- const auto & file = files.at(w.idx);
- file->seek(w.offs, SEEK_SET);
- file->read_raw(cur->data, ggml_nbytes(cur));
- }
- if (check_tensors && !ggml_validate_row_data(cur->type, cur->data, ggml_nbytes(cur))) {
- throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur)));
- }
- }
- size_t size_done = 0;
- size_t size_data = 0;
- std::vector<std::pair<size_t, size_t>> mmaps_used;
- // Returns false if cancelled by progress_callback
- bool load_all_data(
- struct ggml_context * ctx,
- llama_buf_map & bufs_mmap,
- llama_mlocks * lmlocks,
- llama_progress_callback progress_callback,
- void * progress_callback_user_data) {
- GGML_ASSERT(size_data != 0 && "call init_mappings() first");
- std::vector<no_init<uint8_t>> read_buf;
- std::vector<std::future<std::pair<ggml_tensor *, bool>>> validation_result;
- #if defined(GGML_USE_CUDA)
- // 4 staging buffers for async uploads, each sized 1MB seems to be a good default for single NVMe drives.
- // NVMe raid configurations might require more / larger buffers.
- constexpr size_t n_buffers = 4;
- constexpr size_t buffer_size = 1 * 1024 * 1024; // 1MB
- std::vector<ggml_backend_buffer_t> host_buffers;
- std::vector<void*> host_ptrs;
- std::vector<ggml_backend_event_t> events;
- size_t buffer_idx = 0; // buffer to use for async loads
- ggml_backend_t cuda_backend = nullptr;
- if (!use_mmap && !check_tensors) {
- // When not using mmaped io use async uploads from pinned memory to GPU memory.
- // First determine if the CUDA backend is active, and if so, determine the device ID.
- ggml_backend_buffer_t buf = bufs_mmap.count(0) ? bufs_mmap.at(0) : nullptr;
- if (buf) {
- ggml_backend_buffer_type_t buffer_type = ggml_backend_buffer_get_type(buf);
- for (int i = 0; i < ggml_backend_cuda_get_device_count(); ++i) {
- auto * cuda_buffer_type = ggml_backend_cuda_buffer_type(i);
- if (buffer_type == cuda_buffer_type) {
- cuda_backend = ggml_backend_cuda_init(i);
- break;
- }
- }
- }
- // If the cuda backend is active create pinned memory buffers and events for synchronisation.
- if (cuda_backend) {
- for (size_t idx = 0; idx < n_buffers; ++idx) {
- host_buffers.emplace_back(ggml_backend_buft_alloc_buffer(llama_default_buffer_type_cpu(true), buffer_size));
- host_ptrs.emplace_back(ggml_backend_buffer_get_base(host_buffers[idx]));
- events.emplace_back(ggml_backend_event_new(cuda_backend));
- }
- }
- }
- #endif
- for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
- const auto * weight = get_weight(ggml_get_name(cur));
- if (weight == nullptr) {
- // this can happen with split experts models
- continue;
- }
- if (progress_callback) {
- if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
- return false;
- }
- }
- size_t n_size = ggml_nbytes(cur);
- if (use_mmap) {
- const auto & mapping = mappings.at(weight->idx);
- ggml_backend_buffer_t buf_mmap = nullptr;
- if (bufs_mmap.count(weight->idx)) {
- buf_mmap = bufs_mmap.at(weight->idx);
- }
- uint8_t * data = (uint8_t *) mapping->addr + weight->offs;
- if (check_tensors) {
- validation_result.emplace_back(std::async(std::launch::async, [cur, data, n_size] {
- return std::make_pair(cur, ggml_validate_row_data(cur->type, data, n_size));
- }));
- }
- GGML_ASSERT(buf_mmap || cur->data); // either we have a buffer to allocate the tensor in, or it is already allocated
- if (buf_mmap && cur->data == nullptr) {
- ggml_backend_tensor_alloc(buf_mmap, cur, data);
- if (lmlocks) {
- const auto & lmlock = lmlocks->at(weight->idx);
- lmlock->grow_to(weight->offs + n_size);
- }
- auto & mmap_used = mmaps_used[weight->idx];
- mmap_used.first = std::min(mmap_used.first, weight->offs);
- mmap_used.second = std::max(mmap_used.second, weight->offs + n_size);
- } else {
- ggml_backend_tensor_set(cur, data, 0, n_size);
- }
- } else {
- GGML_ASSERT(weight->idx < files.size());
- const auto & file = files.at(weight->idx);
- if (ggml_backend_buffer_is_host(cur->buffer)) {
- file->seek(weight->offs, SEEK_SET);
- file->read_raw(cur->data, n_size);
- if (check_tensors) {
- validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] {
- return std::make_pair(cur, ggml_validate_row_data(cur->type, cur->data, n_size));
- }));
- }
- } else {
- #if defined(GGML_USE_CUDA)
- // If cuda_backend is valid load the tensor in chunks to pinned memory and upload the buffers asynchronously to the GPU.
- if (cuda_backend) {
- file->seek(weight->offs, SEEK_SET);
- size_t bytes_read = 0;
- while (bytes_read < n_size) {
- size_t read_iteration = std::min<size_t>(buffer_size, n_size - bytes_read);
- ggml_backend_event_synchronize(events[buffer_idx]);
- file->read_raw(host_ptrs[buffer_idx], read_iteration);
- ggml_backend_tensor_set_async(cuda_backend, cur, host_ptrs[buffer_idx], bytes_read, read_iteration);
- ggml_backend_event_record(events[buffer_idx]);
- bytes_read += read_iteration;
- ++buffer_idx;
- buffer_idx %= n_buffers;
- }
- }
- else
- #endif
- {
- read_buf.resize(n_size);
- file->seek(weight->offs, SEEK_SET);
- file->read_raw(read_buf.data(), n_size);
- ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size);
- if (check_tensors && !ggml_validate_row_data(cur->type, read_buf.data(), n_size)) {
- throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur)));
- }
- }
- }
- }
- size_done += n_size;
- }
- #if defined(GGML_USE_CUDA)
- // free temporary resources used for async cuda uploads
- if (cuda_backend) {
- for (size_t idx = 0; idx < n_buffers;++idx) {
- ggml_backend_event_synchronize(events[idx]);
- ggml_backend_event_free(events[idx]);
- ggml_backend_buffer_free(host_buffers[idx]);
- }
- ggml_backend_free(cuda_backend);
- }
- #endif
- // check validation results
- bool validation_failed = false;
- for (auto & future : validation_result) {
- auto result = future.get();
- if (!result.second) {
- LLAMA_LOG_ERROR("%s: tensor '%s' has invalid data\n", __func__, ggml_get_name(result.first));
- validation_failed = true;
- }
- }
- if (validation_failed) {
- throw std::runtime_error("found tensors with invalid data");
- }
- // check if this is the last call and do final cleanup
- if (size_done >= size_data) {
- // unmap offloaded tensors and metadata
- if (use_mmap) {
- for (uint32_t idx = 0; idx < mappings.size(); idx++) {
- const auto & mmap_used = mmaps_used.at(idx);
- auto & mapping = mappings.at(idx);
- mapping->unmap_fragment(0, mmap_used.first);
- if (mmap_used.second != 0) {
- mapping->unmap_fragment(mmap_used.second, mapping->size);
- }
- }
- }
- if (progress_callback) {
- // Even though the model is done loading, we still honor
- // cancellation since we need to free allocations.
- return progress_callback(1.0f, progress_callback_user_data);
- }
- }
- return true;
- }
- };
- template<>
- bool llama_model_loader::get_key(const enum llm_kv kid, enum llama_pooling_type & result, const bool required) {
- uint32_t tmp;
- const bool found = get_key(kid, tmp, required);
- if (found) {
- result = (enum llama_pooling_type) tmp;
- } else {
- result = LLAMA_POOLING_TYPE_UNSPECIFIED;
- }
- return found;
- }
- //
- // load LLaMA models
- //
- static const char * llama_model_arch_name(llm_arch arch) {
- auto it = LLM_ARCH_NAMES.find(arch);
- if (it == LLM_ARCH_NAMES.end()) {
- return "unknown";
- }
- return it->second;
- }
- static std::string llama_model_ftype_name(llama_ftype ftype) {
- if (ftype & LLAMA_FTYPE_GUESSED) {
- return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
- }
- switch (ftype) {
- case LLAMA_FTYPE_ALL_F32: return "all F32";
- case LLAMA_FTYPE_MOSTLY_F16: return "F16";
- case LLAMA_FTYPE_MOSTLY_BF16: return "BF16";
- case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0";
- case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1";
- case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0";
- case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1";
- case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0";
- case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium";
- case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small";
- case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small";
- case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium";
- case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large";
- case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small";
- case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium";
- case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small";
- case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium";
- case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K";
- case LLAMA_FTYPE_MOSTLY_TQ1_0: return "TQ1_0 - 1.69 bpw ternary";
- case LLAMA_FTYPE_MOSTLY_TQ2_0: return "TQ2_0 - 2.06 bpw ternary";
- case LLAMA_FTYPE_MOSTLY_IQ2_XXS: return "IQ2_XXS - 2.0625 bpw";
- case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw";
- case LLAMA_FTYPE_MOSTLY_IQ2_S: return "IQ2_S - 2.5 bpw";
- case LLAMA_FTYPE_MOSTLY_IQ2_M: return "IQ2_M - 2.7 bpw";
- case LLAMA_FTYPE_MOSTLY_IQ3_XS: return "IQ3_XS - 3.3 bpw";
- case LLAMA_FTYPE_MOSTLY_IQ3_XXS: return "IQ3_XXS - 3.0625 bpw";
- case LLAMA_FTYPE_MOSTLY_IQ1_S: return "IQ1_S - 1.5625 bpw";
- case LLAMA_FTYPE_MOSTLY_IQ1_M: return "IQ1_M - 1.75 bpw";
- case LLAMA_FTYPE_MOSTLY_IQ4_NL: return "IQ4_NL - 4.5 bpw";
- case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw";
- case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw";
- case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw";
- case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: return "Q4_0_4_4";
- case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: return "Q4_0_4_8";
- case LLAMA_FTYPE_MOSTLY_Q4_0_8_8: return "Q4_0_8_8";
- default: return "unknown, may not work";
- }
- }
- static const char * llama_model_type_name(e_model type) {
- switch (type) {
- case MODEL_14M: return "14M";
- case MODEL_17M: return "17M";
- case MODEL_22M: return "22M";
- case MODEL_33M: return "33M";
- case MODEL_60M: return "60M";
- case MODEL_70M: return "70M";
- case MODEL_80M: return "80M";
- case MODEL_109M: return "109M";
- case MODEL_137M: return "137M";
- case MODEL_160M: return "160M";
- case MODEL_220M: return "220M";
- case MODEL_250M: return "250M";
- case MODEL_270M: return "270M";
- case MODEL_335M: return "335M";
- case MODEL_410M: return "410M";
- case MODEL_450M: return "450M";
- case MODEL_770M: return "770M";
- case MODEL_780M: return "780M";
- case MODEL_0_5B: return "0.5B";
- case MODEL_1B: return "1B";
- case MODEL_1_3B: return "1.3B";
- case MODEL_1_4B: return "1.4B";
- case MODEL_1_6B: return "1.6B";
- case MODEL_2B: return "2B";
- case MODEL_2_8B: return "2.8B";
- case MODEL_3B: return "3B";
- case MODEL_4B: return "4B";
- case MODEL_6B: return "6B";
- case MODEL_6_9B: return "6.9B";
- case MODEL_7B: return "7B";
- case MODEL_8B: return "8B";
- case MODEL_9B: return "9B";
- case MODEL_11B: return "11B";
- case MODEL_12B: return "12B";
- case MODEL_13B: return "13B";
- case MODEL_14B: return "14B";
- case MODEL_15B: return "15B";
- case MODEL_16B: return "16B";
- case MODEL_20B: return "20B";
- case MODEL_30B: return "30B";
- case MODEL_34B: return "34B";
- case MODEL_35B: return "35B";
- case MODEL_40B: return "40B";
- case MODEL_65B: return "65B";
- case MODEL_70B: return "70B";
- case MODEL_236B: return "236B";
- case MODEL_314B: return "314B";
- case MODEL_SMALL: return "0.1B";
- case MODEL_MEDIUM: return "0.4B";
- case MODEL_LARGE: return "0.8B";
- case MODEL_XL: return "1.5B";
- case MODEL_A1_7B: return "A1.7B";
- case MODEL_A2_7B: return "A2.7B";
- case MODEL_8x7B: return "8x7B";
- case MODEL_8x22B: return "8x22B";
- case MODEL_16x12B: return "16x12B";
- case MODEL_10B_128x3_66B: return "10B+128x3.66B";
- case MODEL_57B_A14B: return "57B.A14B";
- case MODEL_27B: return "27B";
- default: return "?B";
- }
- }
- static const char * llama_model_vocab_type_name(enum llama_vocab_type type){
- switch (type) {
- case LLAMA_VOCAB_TYPE_NONE: return "no vocab";
- case LLAMA_VOCAB_TYPE_SPM: return "SPM";
- case LLAMA_VOCAB_TYPE_BPE: return "BPE";
- case LLAMA_VOCAB_TYPE_WPM: return "WPM";
- case LLAMA_VOCAB_TYPE_UGM: return "UGM";
- case LLAMA_VOCAB_TYPE_RWKV: return "RWKV";
- default: return "unknown";
- }
- }
- static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
- model.arch = ml.get_arch();
- if (model.arch == LLM_ARCH_UNKNOWN) {
- throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
- }
- }
- static void llm_load_hparams(
- llama_model_loader & ml,
- llama_model & model) {
- auto & hparams = model.hparams;
- const gguf_context * ctx = ml.meta;
- // get metadata as string
- for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
- enum gguf_type type = gguf_get_kv_type(ctx, i);
- if (type == GGUF_TYPE_ARRAY) {
- continue;
- }
- const char * name = gguf_get_key(ctx, i);
- const std::string value = gguf_kv_to_str(ctx, i);
- model.gguf_kv.emplace(name, value);
- }
- // get general kv
- ml.get_key(LLM_KV_GENERAL_NAME, model.name, false);
- // get hparams kv
- ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
- // everything past this point is not vocab-related
- if (hparams.vocab_only) {
- return;
- }
- ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
- ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
- ml.get_key(LLM_KV_BLOCK_COUNT, hparams.n_layer);
- ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert, false);
- ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
- GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
- GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
- if (hparams.n_expert > 0) {
- GGML_ASSERT(hparams.n_expert_used > 0);
- } else {
- GGML_ASSERT(hparams.n_expert_used == 0);
- }
- // zero-out the per-layer hparams
- std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0);
- std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
- std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0);
- std::fill(hparams.cross_attn_layers.begin(), hparams.cross_attn_layers.end(), -1);
- ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer);
- ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer);
- ml.get_arr(LLM_KV_ATTENTION_CROSS_ATTENTION_LAYERS, hparams.cross_attn_layers, false);
- // n_head_kv is optional, default to n_head
- hparams.n_head_kv_arr = hparams.n_head_arr;
- ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv_arr, hparams.n_layer, false);
- bool rope_finetuned = false;
- ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
- hparams.rope_finetuned = rope_finetuned;
- hparams.n_ctx_orig_yarn = hparams.n_ctx_train;
- ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn, false);
- // rope_freq_base (optional)
- hparams.rope_freq_base_train = 10000.0f;
- ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
- std::string rope_scaling("linear");
- ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
- hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
- GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED);
- // rope_freq_scale (inverse of the kv) is optional
- float ropescale = 0.0f;
- if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
- // try the old key name
- ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
- }
- hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
- ml.get_key(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor, false);
- // non-transformer models do not have attention heads
- if (hparams.n_head() > 0) {
- // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
- // gpt-j n_rot = rotary_dim
- hparams.n_embd_head_k = hparams.n_embd / hparams.n_head();
- ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false);
- hparams.n_embd_head_v = hparams.n_embd / hparams.n_head();
- ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
- // sanity check for n_rot (optional)
- hparams.n_rot = hparams.n_embd_head_k;
- ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
- if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_MLLAMA || model.arch == LLM_ARCH_FALCON) {
- if (hparams.n_rot != hparams.n_embd_head_k) {
- throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd_head_k));
- }
- }
- } else {
- hparams.n_rot = 0;
- hparams.n_embd_head_k = 0;
- hparams.n_embd_head_v = 0;
- }
- // arch-specific KVs
- switch (model.arch) {
- case LLM_ARCH_LLAMA:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- if (hparams.n_expert == 8) {
- switch (hparams.n_layer) {
- case 32: model.type = e_model::MODEL_8x7B; break;
- case 56: model.type = e_model::MODEL_8x22B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } else {
- switch (hparams.n_layer) {
- case 16: model.type = e_model::MODEL_1B; break; // Llama 3.2 1B
- case 22: model.type = e_model::MODEL_1B; break;
- case 26: model.type = e_model::MODEL_3B; break;
- case 28: model.type = e_model::MODEL_3B; break; // Llama 3.2 3B
- // granite uses a vocab with len 49152
- case 32: model.type = hparams.n_vocab == 49152 ? e_model::MODEL_3B : (hparams.n_vocab < 40000 ? e_model::MODEL_7B : e_model::MODEL_8B); break;
- case 36: model.type = e_model::MODEL_8B; break; // granite
- case 40: model.type = e_model::MODEL_13B; break;
- case 48: model.type = e_model::MODEL_34B; break;
- case 60: model.type = e_model::MODEL_30B; break;
- case 80: model.type = hparams.n_head() == hparams.n_head_kv() ? e_model::MODEL_65B : e_model::MODEL_70B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- }
- } break;
- case LLM_ARCH_MLLAMA:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 40: model.type = e_model::MODEL_11B; break;
- case 100: model.type = e_model::MODEL_90B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_MINICPM:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 40: model.type = e_model::MODEL_2B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_MINICPM3:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
- ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
- switch (hparams.n_layer) {
- case 62: model.type = e_model::MODEL_4B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_GROK:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 64: model.type = e_model::MODEL_314B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_FALCON:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- switch (hparams.n_layer) {
- case 32: model.type = e_model::MODEL_7B; break;
- case 60: model.type = e_model::MODEL_40B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_BAICHUAN:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 32: model.type = e_model::MODEL_7B; break;
- case 40: model.type = e_model::MODEL_13B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- if (model.type == e_model::MODEL_13B) {
- // TODO: become GGUF KV parameter
- hparams.f_max_alibi_bias = 8.0f;
- }
- } break;
- case LLM_ARCH_STARCODER:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- switch (hparams.n_layer) {
- case 24: model.type = e_model::MODEL_1B; break;
- case 36: model.type = e_model::MODEL_3B; break;
- case 42: model.type = e_model::MODEL_7B; break;
- case 40: model.type = e_model::MODEL_15B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_REFACT:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 32: model.type = e_model::MODEL_1B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- // TODO: become GGUF KV parameter
- hparams.f_max_alibi_bias = 8.0f;
- } break;
- case LLM_ARCH_BERT:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
- ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
- ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
- switch (hparams.n_layer) {
- case 3:
- model.type = e_model::MODEL_17M; break; // bge-micro
- case 6:
- model.type = e_model::MODEL_22M; break; // MiniLM-L6
- case 12:
- switch (hparams.n_embd) {
- case 384: model.type = e_model::MODEL_33M; break; // MiniLM-L12, bge-small
- case 768: model.type = e_model::MODEL_109M; break; // bge-base
- } break;
- case 24:
- model.type = e_model::MODEL_335M; break; // bge-large
- }
- } break;
- case LLM_ARCH_JINA_BERT_V2:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
- ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
- ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
- hparams.f_max_alibi_bias = 8.0f;
- switch (hparams.n_layer) {
- case 4: model.type = e_model::MODEL_33M; break; // jina-embeddings-small
- case 12: model.type = e_model::MODEL_137M; break; // jina-embeddings-base
- }
- } break;
- case LLM_ARCH_NOMIC_BERT:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
- ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
- ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
- if (hparams.n_layer == 12 && hparams.n_embd == 768) {
- model.type = e_model::MODEL_137M;
- }
- } break;
- case LLM_ARCH_BLOOM:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- switch (hparams.n_layer) {
- case 24: model.type = e_model::MODEL_1B; break;
- case 30:
- switch (hparams.n_embd) {
- case 2560: model.type = e_model::MODEL_3B; break;
- case 4096: model.type = e_model::MODEL_7B; break;
- } break;
- }
- // TODO: become GGUF KV parameter
- hparams.f_max_alibi_bias = 8.0f;
- } break;
- case LLM_ARCH_MPT:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
- ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
- switch (hparams.n_layer) {
- case 32: model.type = e_model::MODEL_7B; break;
- case 48: model.type = e_model::MODEL_30B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_STABLELM:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- switch (hparams.n_layer) {
- case 24: model.type = e_model::MODEL_1B; break;
- case 32: model.type = e_model::MODEL_3B; break;
- case 40: model.type = e_model::MODEL_12B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_QWEN:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 32: model.type = e_model::MODEL_7B; break;
- case 40: model.type = e_model::MODEL_13B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_QWEN2:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 24: model.type = hparams.n_embd == 1024 ? e_model::MODEL_0_5B : e_model::MODEL_1B; break;
- case 32: model.type = e_model::MODEL_7B; break;
- case 40: model.type = hparams.n_head() == 20 ? e_model::MODEL_4B : e_model::MODEL_13B; break;
- case 80: model.type = e_model::MODEL_70B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_QWEN2MOE:
- {
- ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
- ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 24: model.type = e_model::MODEL_A2_7B; break;
- case 28: model.type = e_model::MODEL_57B_A14B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_PHI2:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- switch (hparams.n_layer) {
- case 24: model.type = e_model::MODEL_1B; break;
- case 32: model.type = e_model::MODEL_3B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_PHI3:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 24: model.type = e_model::MODEL_1B; break;
- case 32: model.type = e_model::MODEL_3B; break;
- case 40: model.type = e_model::MODEL_14B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- // for backward compatibility ; see: https://github.com/ggerganov/llama.cpp/pull/8931
- if ((hparams.n_layer == 32 || hparams.n_layer == 40) && hparams.n_ctx_train == 4096) {
- // default value for Phi-3-mini-4k-instruct and Phi-3-medium-4k-instruct
- hparams.n_swa = 2047;
- } else if (hparams.n_layer == 32 && hparams.n_head_kv(0) == 32 && hparams.n_ctx_train == 131072) {
- // default value for Phi-3-mini-128k-instruct
- hparams.n_swa = 262144;
- } else if (hparams.n_layer == 40 && hparams.n_ctx_train == 131072) {
- // default value for Phi-3-medium-128k-instruct
- hparams.n_swa = 131072;
- }
- bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
- if (!found_swa && hparams.n_swa == 0) {
- throw std::runtime_error("invalid value for sliding_window");
- }
- } break;
- case LLM_ARCH_PLAMO:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 40: model.type = e_model::MODEL_13B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_GPT2:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- switch (hparams.n_layer) {
- case 12: model.type = e_model::MODEL_SMALL; break;
- case 24: model.type = e_model::MODEL_MEDIUM; break;
- case 36: model.type = e_model::MODEL_LARGE; break;
- case 48: model.type = e_model::MODEL_XL; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_CODESHELL:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- switch (hparams.n_layer) {
- case 42: model.type = e_model::MODEL_7B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_ORION:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- switch (hparams.n_layer) {
- case 40: model.type = e_model::MODEL_14B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_INTERNLM2:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 32: model.type = e_model::MODEL_7B; break;
- case 48: model.type = e_model::MODEL_20B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_GEMMA:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 18: model.type = e_model::MODEL_2B; break;
- case 28: model.type = e_model::MODEL_7B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_GEMMA2:
- {
- hparams.n_swa = 4096; // default value of gemma 2
- ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false);
- ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false);
- hparams.attn_soft_cap = true;
- switch (hparams.n_layer) {
- case 26: model.type = e_model::MODEL_2B; break;
- case 42: model.type = e_model::MODEL_9B; break;
- case 46: model.type = e_model::MODEL_27B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_STARCODER2:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- switch (hparams.n_layer) {
- case 30: model.type = e_model::MODEL_3B; break;
- case 32: model.type = e_model::MODEL_7B; break;
- case 40: model.type = e_model::MODEL_15B; break;
- case 52: model.type = e_model::MODEL_20B; break; // granite
- case 88: model.type = e_model::MODEL_34B; break; // granite
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_MAMBA:
- {
- ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv);
- ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner);
- ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state);
- ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
- ml.get_key(LLM_KV_SSM_DT_B_C_RMS, hparams.ssm_dt_b_c_rms, false);
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 24:
- switch (hparams.n_embd) {
- case 768: model.type = e_model::MODEL_SMALL; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- } break;
- case 48:
- switch (hparams.n_embd) {
- case 1024: model.type = e_model::MODEL_MEDIUM; break;
- case 1536: model.type = e_model::MODEL_LARGE; break;
- case 2048: model.type = e_model::MODEL_XL; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- } break;
- case 64:
- switch (hparams.n_embd) {
- case 2560: model.type = e_model::MODEL_3B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- } break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_XVERSE:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 32: model.type = e_model::MODEL_7B; break;
- case 40: model.type = e_model::MODEL_13B; break;
- case 80: model.type = e_model::MODEL_65B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_COMMAND_R:
- {
- ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- switch (hparams.n_layer) {
- case 40: model.type = e_model::MODEL_35B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_DBRX:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv);
- switch (hparams.n_layer) {
- case 40: model.type = e_model::MODEL_16x12B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_OLMO:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
- switch (hparams.n_layer) {
- case 22: model.type = e_model::MODEL_1B; break;
- case 32: model.type = e_model::MODEL_7B; break;
- case 80: model.type = e_model::MODEL_70B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_OLMOE:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 16: model.type = e_model::MODEL_A1_7B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_OPENELM:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 16: model.type = e_model::MODEL_270M; break;
- case 20: model.type = e_model::MODEL_450M; break;
- case 28: model.type = e_model::MODEL_1B; break;
- case 36: model.type = e_model::MODEL_3B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_GPTNEOX:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- ml.get_key(LLM_KV_USE_PARALLEL_RESIDUAL, hparams.use_par_res);
- switch (hparams.n_layer) {
- case 6:
- switch (hparams.n_ff()) {
- case 512: model.type = e_model::MODEL_14M; break;
- case 2048: model.type = e_model::MODEL_70M; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- } break;
- case 12:
- switch (hparams.n_ff()) {
- case 3072: model.type = e_model::MODEL_160M; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- } break;
- case 16:
- switch (hparams.n_ff()) {
- case 8192: model.type = e_model::MODEL_1B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- } break;
- case 24:
- switch (hparams.n_ff()) {
- case 4096: model.type = e_model::MODEL_410M; break;
- case 8192: model.type = e_model::MODEL_1_4B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- } break;
- case 32:
- switch (hparams.n_ff()) {
- case 10240: model.type = e_model::MODEL_2_8B; break;
- case 16384: model.type = e_model::MODEL_6_9B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- } break;
- case 36:
- switch (hparams.n_ff()) {
- case 20480: model.type = e_model::MODEL_12B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- } break;
- case 44:
- switch (hparams.n_ff()) {
- case 24576: model.type = e_model::MODEL_20B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- } break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_ARCTIC:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- if (hparams.n_expert == 128) {
- switch (hparams.n_layer) {
- case 35: model.type = e_model::MODEL_10B_128x3_66B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } else {
- model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_DEEPSEEK2:
- {
- bool is_lite = (hparams.n_layer == 27);
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
- if (!is_lite) {
- ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
- }
- ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
- ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
- ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
- ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
- ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul);
- switch (hparams.n_layer) {
- case 27: model.type = e_model::MODEL_16B; break;
- case 60: model.type = e_model::MODEL_236B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_CHATGLM:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 28: model.type = e_model::MODEL_6B; break;
- case 40: model.type = e_model::MODEL_9B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_BITNET:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 26: model.type = e_model::MODEL_3B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_T5:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
- uint32_t dec_start_token_id;
- if (ml.get_key(LLM_KV_DECODER_START_TOKEN_ID, dec_start_token_id, false)) {
- hparams.dec_start_token_id = dec_start_token_id;
- }
- switch (hparams.n_layer) {
- case 6: model.type = e_model::MODEL_60M; break; // t5-small
- case 8: model.type = e_model::MODEL_80M; break; // flan-t5-small
- case 12:
- switch (hparams.n_ff()) {
- case 3072: model.type = e_model::MODEL_220M; break; // t5-base
- case 2048: model.type = e_model::MODEL_250M; break; // flan-t5-base
- default: model.type = e_model::MODEL_UNKNOWN;
- } break;
- case 24:
- switch (hparams.n_ff()) {
- case 4096: model.type = e_model::MODEL_770M; break; // t5-large
- case 2816: model.type = e_model::MODEL_780M; break; // flan-t5-large
- case 16384: model.type = e_model::MODEL_3B; break; // t5-3b
- case 5120: model.type = e_model::MODEL_3B; break; // flan-t5-xl
- case 65536: model.type = e_model::MODEL_11B; break; // t5-11b
- case 10240: model.type = e_model::MODEL_11B; break; // flan-t5-xxl
- default: model.type = e_model::MODEL_UNKNOWN;
- } break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_T5ENCODER:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
- model.type = e_model::MODEL_UNKNOWN;
- } break;
- case LLM_ARCH_JAIS:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
- switch (hparams.n_layer) {
- case 24: model.type = e_model::MODEL_1_3B; break;
- case 40: model.type = e_model::MODEL_13B; break;
- /* TODO: add variants */
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_NEMOTRON:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- switch (hparams.n_layer) {
- case 32: model.type = e_model::MODEL_4B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_EXAONE:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- switch (hparams.n_layer) {
- case 32: model.type = e_model::MODEL_8B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_RWKV6:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
- ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size);
- ml.get_key(LLM_KV_TIME_MIX_EXTRA_DIM, hparams.time_mix_extra_dim);
- ml.get_key(LLM_KV_TIME_DECAY_EXTRA_DIM, hparams.time_decay_extra_dim);
- ml.get_key(LLM_KV_RESCALE_EVERY_N_LAYERS, hparams.rescale_every_n_layers, false);
- switch (hparams.n_layer) {
- case 24: model.type = e_model::MODEL_1_6B; break;
- case 32:
- switch (hparams.n_embd) {
- case 2560: model.type = e_model::MODEL_3B; break;
- case 4096: model.type = e_model::MODEL_7B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- } break;
- case 61: model.type = e_model::MODEL_14B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_GRANITE:
- case LLM_ARCH_GRANITE_MOE:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
- ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale);
- ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale);
- ml.get_key(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale);
- switch (hparams.n_layer) {
- case 32: model.type = e_model::MODEL_3B; break;
- case 40: model.type = e_model::MODEL_3B; break;
- // Add additional layer/vocab/etc checks here for other model sizes
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_CHAMELEON:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- hparams.f_norm_eps = 1e-5; // eps for qk-norm, torch default
- ml.get_key(LLM_KV_SWIN_NORM, hparams.swin_norm);
- switch (hparams.n_layer) {
- case 32: model.type = e_model::MODEL_7B; break;
- case 48: model.type = e_model::MODEL_34B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- } break;
- case LLM_ARCH_SOLAR:
- {
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
- for (int i = 0; i < hparams.n_bskcn_arr.max_size(); ++i) {
- auto & bskcn = hparams.n_bskcn_arr.at(i);
- bskcn.fill(0);
- ml.get_key_or_arr(::format(LLM_KV_NAMES.at(LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION), LLM_ARCH_NAMES.at(ml.llm_kv.arch), i), bskcn, hparams.n_layer, false);
- }
- switch (hparams.n_layer) {
- case 64: model.type = e_model::MODEL_22B; break;
- default: model.type = e_model::MODEL_UNKNOWN;
- }
- }
- default: (void)0;
- }
- model.ftype = ml.ftype;
- if (hparams.f_max_alibi_bias > 0.0f) {
- hparams.use_alibi = true;
- }
- hparams.rope_type = llama_rope_type(&model);
- }
- static void llm_load_vocab(
- llama_model_loader & ml,
- llama_model & model) {
- auto & vocab = model.vocab;
- struct gguf_context * ctx = ml.meta;
- const auto kv = LLM_KV(model.arch);
- // determine vocab type
- {
- std::string tokenizer_model;
- std::string tokenizer_pre;
- ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_model);
- ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
- if (tokenizer_model == "no_vocab") {
- vocab.type = LLAMA_VOCAB_TYPE_NONE;
- // default special tokens
- vocab.special_bos_id = -1;
- vocab.special_eos_id = -1;
- vocab.special_unk_id = -1;
- vocab.special_sep_id = -1;
- vocab.special_pad_id = -1;
- vocab.special_cls_id = -1;
- vocab.special_mask_id = -1;
- vocab.linefeed_id = -1;
- // read vocab size from metadata
- if (!ml.get_key(LLM_KV_VOCAB_SIZE, vocab.n_vocab, false)) {
- vocab.n_vocab = 0;
- LLAMA_LOG_WARN("%s: there is no vocab_size in metadata, vocab.n_vocab will be set to %u\n", __func__, vocab.n_vocab);
- }
- return;
- }
- if (tokenizer_model == "llama") {
- vocab.type = LLAMA_VOCAB_TYPE_SPM;
- // default special tokens
- vocab.special_bos_id = 1;
- vocab.special_eos_id = 2;
- vocab.special_unk_id = 0;
- vocab.special_sep_id = -1;
- vocab.special_pad_id = -1;
- vocab.special_cls_id = -1;
- vocab.special_mask_id = -1;
- } else if (tokenizer_model == "bert") {
- vocab.type = LLAMA_VOCAB_TYPE_WPM;
- // default special tokens
- vocab.special_bos_id = -1;
- vocab.special_eos_id = -1;
- vocab.special_unk_id = 100;
- vocab.special_sep_id = 102;
- vocab.special_pad_id = 0;
- vocab.special_cls_id = 101;
- vocab.special_mask_id = 103;
- } else if (tokenizer_model == "gpt2") {
- vocab.type = LLAMA_VOCAB_TYPE_BPE;
- // read bpe merges and populate bpe ranks
- const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
- if (merges_keyidx == -1) {
- throw std::runtime_error("cannot find tokenizer merges in model file\n");
- }
- const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
- for (int i = 0; i < n_merges; i++) {
- const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
- GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
- std::string first;
- std::string second;
- const size_t pos = word.find(' ', 1);
- if (pos != std::string::npos) {
- first = word.substr(0, pos);
- second = word.substr(pos + 1);
- }
- vocab.bpe_ranks.emplace(std::make_pair(first, second), i);
- }
- // default special tokens
- vocab.special_bos_id = 11;
- vocab.special_eos_id = 11;
- vocab.special_unk_id = -1;
- vocab.special_sep_id = -1;
- vocab.special_pad_id = -1;
- vocab.special_cls_id = -1;
- vocab.special_mask_id = -1;
- } else if (tokenizer_model == "t5") {
- vocab.type = LLAMA_VOCAB_TYPE_UGM;
- // default special tokens
- vocab.special_bos_id = -1;
- vocab.special_eos_id = 1;
- vocab.special_unk_id = 2;
- vocab.special_sep_id = -1;
- vocab.special_pad_id = 0;
- vocab.special_cls_id = -1;
- vocab.special_mask_id = -1;
- const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str());
- if (precompiled_charsmap_keyidx != -1) {
- size_t n_precompiled_charsmap = gguf_get_arr_n(ctx, precompiled_charsmap_keyidx);
- const char * precompiled_charsmap = (const char *) gguf_get_arr_data(ctx, precompiled_charsmap_keyidx);
- vocab.precompiled_charsmap.assign(precompiled_charsmap, precompiled_charsmap + n_precompiled_charsmap);
- #ifdef IS_BIG_ENDIAN
- // correct endiannes of data in precompiled_charsmap binary blob
- uint32_t * xcda_blob_size = (uint32_t *) &vocab.precompiled_charsmap[0];
- *xcda_blob_size = __builtin_bswap32(*xcda_blob_size);
- assert(*xcda_blob_size + sizeof(uint32_t) < n_precompiled_charsmap);
- size_t xcda_array_size = *xcda_blob_size / sizeof(uint32_t);
- uint32_t * xcda_array = (uint32_t *) &vocab.precompiled_charsmap[sizeof(uint32_t)];
- for (size_t i = 0; i < xcda_array_size; ++i) {
- xcda_array[i] = __builtin_bswap32(xcda_array[i]);
- }
- #endif
- }
- } else if (tokenizer_model == "rwkv") {
- vocab.type = LLAMA_VOCAB_TYPE_RWKV;
- // default special tokens
- vocab.special_bos_id = -1;
- vocab.special_eos_id = -1;
- vocab.special_unk_id = -1;
- vocab.special_sep_id = -1;
- vocab.special_pad_id = -1;
- } else {
- throw std::runtime_error(format("unknown tokenizer: '%s'", tokenizer_model.c_str()));
- }
- // for now, only BPE models have pre-tokenizers
- if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
- vocab.tokenizer_add_space_prefix = false;
- vocab.tokenizer_clean_spaces = true;
- if (tokenizer_pre == "default") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
- } else if (
- tokenizer_pre == "llama3" ||
- tokenizer_pre == "llama-v3" ||
- tokenizer_pre == "llama-bpe") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3;
- vocab.tokenizer_ignore_merges = true;
- vocab.tokenizer_add_bos = true;
- } else if (
- tokenizer_pre == "deepseek-llm") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM;
- vocab.tokenizer_clean_spaces = false;
- } else if (
- tokenizer_pre == "deepseek-coder") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER;
- vocab.tokenizer_clean_spaces = false;
- } else if (
- tokenizer_pre == "falcon") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON;
- } else if (
- tokenizer_pre == "mpt") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MPT;
- } else if (
- tokenizer_pre == "starcoder") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER;
- } else if (
- tokenizer_pre == "gpt-2" ||
- tokenizer_pre == "phi-2" ||
- tokenizer_pre == "jina-es" ||
- tokenizer_pre == "jina-de" ||
- tokenizer_pre == "jina-v1-en" ||
- tokenizer_pre == "jina-v2-es" ||
- tokenizer_pre == "jina-v2-de" ||
- tokenizer_pre == "jina-v2-code") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
- } else if (
- tokenizer_pre == "refact") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT;
- } else if (
- tokenizer_pre == "command-r") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
- vocab.tokenizer_clean_spaces = false;
- } else if (
- tokenizer_pre == "qwen2") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
- vocab.tokenizer_clean_spaces = false;
- } else if (
- tokenizer_pre == "stablelm2") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STABLELM2;
- } else if (
- tokenizer_pre == "olmo") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO;
- } else if (
- tokenizer_pre == "dbrx") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
- } else if (
- tokenizer_pre == "smaug-bpe") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG;
- } else if (
- tokenizer_pre == "poro-chat") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO;
- vocab.tokenizer_clean_spaces = false;
- } else if (
- tokenizer_pre == "chatglm-bpe") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHATGLM4;
- vocab.special_bos_id = -1;
- } else if (
- tokenizer_pre == "viking") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_VIKING;
- vocab.tokenizer_clean_spaces = false;
- } else if (
- tokenizer_pre == "jais") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS;
- } else if (
- tokenizer_pre == "tekken") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_TEKKEN;
- vocab.tokenizer_clean_spaces = false;
- vocab.tokenizer_ignore_merges = true;
- vocab.tokenizer_add_bos = true;
- } else if (
- tokenizer_pre == "smollm") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMOLLM;
- vocab.tokenizer_clean_spaces = false;
- } else if (
- tokenizer_pre == "codeshell") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CODESHELL;
- } else if (
- tokenizer_pre == "bloom") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_BLOOM;
- } else if (
- tokenizer_pre == "gpt3-finnish") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH;
- } else if (
- tokenizer_pre == "exaone") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_EXAONE;
- } else if (
- tokenizer_pre == "chameleon") {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHAMELEON;
- vocab.tokenizer_add_bos = true;
- vocab.tokenizer_clean_spaces = false;
- } else {
- LLAMA_LOG_WARN("%s: missing or unrecognized pre-tokenizer type, using: 'default'\n", __func__);
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
- }
- } else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
- vocab.tokenizer_add_space_prefix = true;
- vocab.tokenizer_clean_spaces = false;
- vocab.tokenizer_add_bos = true;
- vocab.tokenizer_add_eos = false;
- } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
- vocab.tokenizer_add_space_prefix = false;
- vocab.tokenizer_clean_spaces = true;
- vocab.tokenizer_add_bos = true;
- vocab.tokenizer_add_eos = false;
- } else if (vocab.type == LLAMA_VOCAB_TYPE_UGM) {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
- vocab.tokenizer_add_bos = false;
- vocab.tokenizer_add_eos = true;
- } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
- vocab.tokenizer_add_space_prefix = false;
- vocab.tokenizer_clean_spaces = false;
- vocab.tokenizer_add_bos = false;
- vocab.tokenizer_add_eos = false;
- } else {
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
- }
- ml.get_key(LLM_KV_TOKENIZER_ADD_PREFIX, vocab.tokenizer_add_space_prefix, false);
- ml.get_key(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.tokenizer_remove_extra_whitespaces, false);
- }
- const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
- if (token_idx == -1) {
- throw std::runtime_error("cannot find tokenizer vocab in model file\n");
- }
- const float * scores = nullptr;
- const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
- if (score_idx != -1) {
- scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
- }
- const int * toktypes = nullptr;
- const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
- if (toktype_idx != -1) {
- toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
- }
- const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
- vocab.n_vocab = n_vocab;
- vocab.id_to_token.resize(n_vocab);
- for (uint32_t i = 0; i < n_vocab; i++) {
- std::string word = gguf_get_arr_str(ctx, token_idx, i);
- //GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
- if (word.empty()) {
- LLAMA_LOG_WARN("%s: empty token at index %u\n", __func__, i);
- word = "[EMPTY_" + std::to_string(i) + "]";
- }
- vocab.token_to_id[word] = i;
- vocab.max_token_len = std::max(vocab.max_token_len, (int) word.size());
- auto & token_data = vocab.id_to_token[i];
- token_data.text = std::move(word);
- token_data.score = scores ? scores[i] : 0.0f;
- token_data.attr = LLAMA_TOKEN_ATTR_NORMAL;
- if (toktypes) { //TODO: remove, required until per token attributes are available from GGUF file
- switch(toktypes[i]) {
- case LLAMA_TOKEN_TYPE_UNKNOWN: token_data.attr = LLAMA_TOKEN_ATTR_UNKNOWN; break;
- case LLAMA_TOKEN_TYPE_UNUSED: token_data.attr = LLAMA_TOKEN_ATTR_UNUSED; break;
- case LLAMA_TOKEN_TYPE_NORMAL: token_data.attr = LLAMA_TOKEN_ATTR_NORMAL; break;
- case LLAMA_TOKEN_TYPE_CONTROL: token_data.attr = LLAMA_TOKEN_ATTR_CONTROL; break;
- case LLAMA_TOKEN_TYPE_USER_DEFINED: token_data.attr = LLAMA_TOKEN_ATTR_USER_DEFINED; break;
- case LLAMA_TOKEN_TYPE_BYTE: token_data.attr = LLAMA_TOKEN_ATTR_BYTE; break;
- case LLAMA_TOKEN_TYPE_UNDEFINED: token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED; break;
- default: token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED; break;
- }
- }
- }
- GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
- vocab.init_tokenizer();
- // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
- if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
- // For Fill-In-the-Middle (FIM)/infill models which where converted
- // prior to support of FIM special tokens in GGUF, the following
- // will allow those models to continue to work. The general names
- // of the known models are currently CodeLlama (LLM_ARCH_LLAMA) and
- // CodeGemma (LLM_ARCH_GEMMA). This can potentially be removed once
- // new versions of these models have been published.
- std::string gen_name;
- ml.get_key(LLM_KV_GENERAL_NAME, gen_name, false);
- std::transform(gen_name.begin(), gen_name.end(), gen_name.begin(),
- [](unsigned char c){ return std::tolower(c); });
- if (gen_name.find("code") != std::string::npos) {
- if (model.arch == LLM_ARCH_LLAMA
- && 32010 < vocab.id_to_token.size()
- && vocab.id_to_token[32007].text.find("<PRE>") != std::string::npos
- && vocab.id_to_token[32008].text.find("<SUF>") != std::string::npos
- && vocab.id_to_token[32009].text.find("<MID>") != std::string::npos
- && vocab.id_to_token[32010].text.find("<EOT>") != std::string::npos) {
- vocab.special_prefix_id = 32007;
- vocab.special_suffix_id = 32008;
- vocab.special_middle_id = 32009;
- vocab.special_eot_id = 32010;
- } else if (model.arch == LLM_ARCH_GEMMA
- && 107 < vocab.id_to_token.size()
- && vocab.id_to_token[67].text == "<|fim_prefix|>"
- && vocab.id_to_token[69].text == "<|fim_suffix|>"
- && vocab.id_to_token[68].text == "<|fim_middle|>"
- && vocab.id_to_token[107].text == "<end_of_turn>") {
- vocab.special_prefix_id = 67;
- vocab.special_suffix_id = 69;
- vocab.special_middle_id = 68;
- // TODO: this is not EOT, it is "file separator" token, needs fix
- // https://huggingface.co/google/codegemma-7b-it/blob/9b1d9231388358c04d90bd003458f5070d97db44/tokenizer_config.json#L565-L572
- //vocab.special_eot_id = 70;
- vocab.special_eot_id = 107;
- }
- }
- try {
- vocab.linefeed_id = llama_byte_to_token_impl(vocab, '\n');
- } catch (const std::exception & e) {
- LLAMA_LOG_WARN("%s: SPM vocabulary, but newline token not found: %s! Using special_pad_id instead.", __func__, e.what());
- vocab.linefeed_id = vocab.special_pad_id;
- }
- } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
- vocab.linefeed_id = vocab.special_pad_id;
- } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) {
- const std::vector<int> ids = llama_tokenize_internal(vocab, "\n", false);
- GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
- vocab.linefeed_id = ids[0];
- } else {
- const std::vector<int> ids = llama_tokenize_internal(vocab, "\xC4\x8A", false); // U+010A
- //GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
- if (ids.empty()) {
- LLAMA_LOG_WARN("%s: model vocab missing newline token, using special_pad_id instead\n", __func__);
- vocab.linefeed_id = vocab.special_pad_id;
- } else {
- vocab.linefeed_id = ids[0];
- }
- }
- // special tokens
- {
- const std::vector<std::pair<enum llm_kv, int32_t &>> special_token_types = {
- { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id },
- { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id },
- { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id },
- { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id },
- { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id },
- { LLM_KV_TOKENIZER_CLS_ID, vocab.special_cls_id },
- { LLM_KV_TOKENIZER_MASK_ID, vocab.special_mask_id },
- { LLM_KV_TOKENIZER_PREFIX_ID, vocab.special_prefix_id },
- { LLM_KV_TOKENIZER_SUFFIX_ID, vocab.special_suffix_id },
- { LLM_KV_TOKENIZER_MIDDLE_ID, vocab.special_middle_id },
- { LLM_KV_TOKENIZER_EOT_ID, vocab.special_eot_id },
- { LLM_KV_TOKENIZER_EOM_ID, vocab.special_eom_id },
- };
- for (const auto & it : special_token_types) {
- const std::string & key = kv(std::get<0>(it));
- int32_t & id = std::get<1>(it);
- uint32_t new_id;
- if (!ml.get_key(std::get<0>(it), new_id, false)) {
- continue;
- }
- if (new_id >= vocab.id_to_token.size()) {
- LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n",
- __func__, key.c_str(), new_id, id);
- } else {
- id = new_id;
- }
- }
- // Handle add_bos_token and add_eos_token
- {
- bool temp = true;
- if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) {
- vocab.tokenizer_add_bos = temp;
- }
- if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
- vocab.tokenizer_add_eos = temp;
- }
- }
- // find EOT token: "<|eot_id|>", "<|im_end|>", "<end_of_turn>", etc.
- //
- // TODO: convert scripts should provide this token through the KV metadata LLAMA_KV_TOKENIZER_EOT_ID
- // for now, we apply this workaround to find the EOT token based on its text
- if (vocab.special_eot_id == -1) {
- for (const auto & t : vocab.token_to_id) {
- if (false
- // TODO: gemma "<end_of_turn>" is exported as a normal token, so the following check does not work
- // need to fix convert script
- //vocab.id_to_token[t.second].type == LLAMA_TOKEN_TYPE_CONTROL &&
- || t.first == "<|eot_id|>"
- || t.first == "<|im_end|>"
- || t.first == "<|end|>"
- || t.first == "<end_of_turn>"
- || t.first == "<|endoftext|>"
- || t.first == "<EOT>"
- ) {
- vocab.special_eot_id = t.second;
- if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
- LLAMA_LOG_WARN("%s: control-looking token: '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
- __func__, t.first.c_str());
- vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
- }
- break;
- }
- }
- }
- // find EOM token: "<|eom_id|>"
- //
- // TODO: convert scripts should provide this token through the KV metadata LLAMA_KV_TOKENIZER_EOM_ID
- // for now, we apply this workaround to find the EOM token based on its text
- if (vocab.special_eom_id == -1) {
- const auto & t = vocab.token_to_id.find("<|eom_id|>");
- if (t != vocab.token_to_id.end()) {
- vocab.special_eom_id = t->second;
- if ((vocab.id_to_token[t->second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
- LLAMA_LOG_WARN("%s: control-looking token: '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
- __func__, t->first.c_str());
- vocab.id_to_token[t->second].attr = LLAMA_TOKEN_ATTR_CONTROL;
- }
- }
- }
- // maintain a list of tokens that cause end-of-generation
- // this is currently determined based on the token text, which is obviously not ideal
- // ref: https://github.com/ggerganov/llama.cpp/issues/9606
- vocab.special_eog_ids.clear();
- for (const auto & t : vocab.token_to_id) {
- if (false
- || t.first == "<|eot_id|>"
- || t.first == "<|im_end|>"
- || t.first == "<|end|>"
- || t.first == "<end_of_turn>"
- || t.first == "<|endoftext|>"
- || t.first == "<|eom_id|>"
- || t.first == "<EOT>"
- ) {
- vocab.special_eog_ids.insert(t.second);
- if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
- LLAMA_LOG_WARN("%s: control-looking token: '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
- __func__, t.first.c_str());
- vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
- }
- }
- }
- if (vocab.special_eos_id != -1 && vocab.special_eog_ids.count(vocab.special_eos_id) == 0) {
- vocab.special_eog_ids.insert(vocab.special_eos_id);
- LLAMA_LOG_WARN("%s: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
- }
- if (vocab.special_eot_id != -1 && vocab.special_eog_ids.count(vocab.special_eot_id) == 0) {
- vocab.special_eog_ids.insert(vocab.special_eot_id);
- LLAMA_LOG_WARN("%s: special_eot_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
- }
- if (vocab.special_eom_id != -1 && vocab.special_eog_ids.count(vocab.special_eom_id) == 0) {
- vocab.special_eog_ids.insert(vocab.special_eom_id);
- LLAMA_LOG_WARN("%s: special_eom_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
- }
- }
- // build special tokens cache
- {
- for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
- if (vocab.id_to_token[id].attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED | LLAMA_TOKEN_ATTR_UNKNOWN)) {
- vocab.cache_special_tokens.push_back(id);
- }
- }
- std::sort(vocab.cache_special_tokens.begin(), vocab.cache_special_tokens.end(),
- [&] (const llama_vocab::id a, const llama_vocab::id b) {
- return vocab.id_to_token[a].text.size() > vocab.id_to_token[b].text.size();
- }
- );
- LLAMA_LOG_INFO("%s: special tokens cache size = %u\n", __func__, (uint32_t)vocab.cache_special_tokens.size());
- }
- // build token to piece cache
- {
- size_t size_cache = 0;
- std::vector<llama_vocab::token> cache_token_to_piece(n_vocab);
- for (uint32_t id = 0; id < n_vocab; ++id) {
- cache_token_to_piece[id] = llama_token_to_piece(&model, id, true);
- size_cache += cache_token_to_piece[id].size();
- }
- std::swap(vocab.cache_token_to_piece, cache_token_to_piece);
- LLAMA_LOG_INFO("%s: token to piece cache size = %.4f MB\n", __func__, size_cache / 1024.0 / 1024.0);
- }
- // Handle per token attributes
- //NOTE: Each model customizes per token attributes.
- //NOTE: Per token attributes are missing from the GGUF file.
- //TODO: Extract attributes from GGUF file.
- {
- auto _contains_any = [] (const std::string &str, const std::vector<std::string> &substrs) -> bool {
- for (auto substr : substrs) {
- if (str.find(substr) < std::string::npos) {
- return true;
- }
- }
- return false;
- };
- auto _set_tokenid_attr = [&] (const llama_vocab::id id, llama_token_attr attr, bool value) {
- uint32_t current = vocab.id_to_token.at(id).attr;
- current = value ? (current | attr) : (current & ~attr);
- vocab.id_to_token[id].attr = (llama_token_attr) current;
- };
- auto _set_token_attr = [&] (const std::string & token, llama_token_attr attr, bool value) {
- _set_tokenid_attr(vocab.token_to_id.at(token), attr, value);
- };
- std::string model_name;
- std::string tokenizer_pre;
- ml.get_key(LLM_KV_GENERAL_NAME, model_name, false);
- ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
- // model name to lowercase
- std::transform(model_name.begin(), model_name.end(), model_name.begin(),
- [] (const std::string::value_type x) {
- return std::tolower(x);
- }
- );
- // set attributes by model/tokenizer name
- if (_contains_any(tokenizer_pre, {"jina-v2-de", "jina-v2-es", "jina-v2-code"})) {
- _set_token_attr("<mask>", LLAMA_TOKEN_ATTR_LSTRIP, true);
- } else if (_contains_any(model_name, {"phi-3", "phi3"})) {
- for (auto id : vocab.cache_special_tokens) {
- _set_tokenid_attr(id, LLAMA_TOKEN_ATTR_RSTRIP, true);
- }
- for (auto token : {"</s>"}) {
- _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, true);
- }
- for (auto token : {"<unk>", "<s>", "<|endoftext|>"}) {
- _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, false);
- }
- }
- }
- }
- static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
- const auto & hparams = model.hparams;
- const auto & vocab = model.vocab;
- const char * rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
- auto print_f = [](const std::function<uint32_t(uint32_t)> & f, uint32_t n) {
- bool is_var = false;
- std::vector<uint32_t> v;
- for (uint32_t i = 0; i < n; ++i) {
- v.push_back(f(i));
- if (v[i] != v[0]) {
- is_var = true;
- }
- }
- std::stringstream ss;
- if (is_var) {
- ss << "[";
- for (uint32_t i = 0; i < n; ++i) {
- ss << v[i];
- if (i < n - 1) {
- ss << ", ";
- }
- }
- ss << "]";
- } else {
- ss << v[0];
- }
- return ss.str();
- };
- // hparams
- LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver));
- LLAMA_LOG_INFO("%s: arch = %s\n", __func__, LLM_ARCH_NAMES.at(model.arch));
- LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, llama_model_vocab_type_name(vocab.type));
- LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
- LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (int) vocab.bpe_ranks.size());
- LLAMA_LOG_INFO("%s: vocab_only = %d\n", __func__, hparams.vocab_only);
- if (!hparams.vocab_only) {
- LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
- LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
- LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
- LLAMA_LOG_INFO("%s: n_head = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_head(il); }, hparams.n_layer).c_str());
- LLAMA_LOG_INFO("%s: n_head_kv = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_head_kv(il); }, hparams.n_layer).c_str());
- LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot);
- LLAMA_LOG_INFO("%s: n_swa = %u\n", __func__, hparams.n_swa);
- LLAMA_LOG_INFO("%s: n_embd_head_k = %u\n", __func__, hparams.n_embd_head_k);
- LLAMA_LOG_INFO("%s: n_embd_head_v = %u\n", __func__, hparams.n_embd_head_v);
- LLAMA_LOG_INFO("%s: n_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_gqa(il); }, hparams.n_layer).c_str());
- LLAMA_LOG_INFO("%s: n_embd_k_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_embd_k_gqa(il); }, hparams.n_layer).c_str());
- LLAMA_LOG_INFO("%s: n_embd_v_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_embd_v_gqa(il); }, hparams.n_layer).c_str());
- LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
- LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
- LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv);
- LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias);
- LLAMA_LOG_INFO("%s: f_logit_scale = %.1e\n", __func__, hparams.f_logit_scale);
- LLAMA_LOG_INFO("%s: n_ff = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_ff(il); }, hparams.n_layer).c_str());
- LLAMA_LOG_INFO("%s: n_expert = %u\n", __func__, hparams.n_expert);
- LLAMA_LOG_INFO("%s: n_expert_used = %u\n", __func__, hparams.n_expert_used);
- LLAMA_LOG_INFO("%s: causal attn = %d\n", __func__, hparams.causal_attn);
- LLAMA_LOG_INFO("%s: pooling type = %d\n", __func__, hparams.pooling_type);
- LLAMA_LOG_INFO("%s: rope type = %d\n", __func__, hparams.rope_type);
- LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type);
- LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
- LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
- LLAMA_LOG_INFO("%s: n_ctx_orig_yarn = %u\n", __func__, hparams.n_ctx_orig_yarn);
- LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
- LLAMA_LOG_INFO("%s: ssm_d_conv = %u\n", __func__, hparams.ssm_d_conv);
- LLAMA_LOG_INFO("%s: ssm_d_inner = %u\n", __func__, hparams.ssm_d_inner);
- LLAMA_LOG_INFO("%s: ssm_d_state = %u\n", __func__, hparams.ssm_d_state);
- LLAMA_LOG_INFO("%s: ssm_dt_rank = %u\n", __func__, hparams.ssm_dt_rank);
- LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms = %d\n", __func__, hparams.ssm_dt_b_c_rms);
- }
- LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
- LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
- if (ml.n_elements >= 1e12) {
- LLAMA_LOG_INFO("%s: model params = %.2f T\n", __func__, ml.n_elements*1e-12);
- } else if (ml.n_elements >= 1e9) {
- LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
- } else if (ml.n_elements >= 1e6) {
- LLAMA_LOG_INFO("%s: model params = %.2f M\n", __func__, ml.n_elements*1e-6);
- } else {
- LLAMA_LOG_INFO("%s: model params = %.2f K\n", __func__, ml.n_elements*1e-3);
- }
- if (ml.n_bytes < GiB) {
- LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
- } else {
- LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
- }
- // general kv
- LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
- // special tokens
- if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
- if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
- if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
- if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
- if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
- if (vocab.special_cls_id != -1) { LLAMA_LOG_INFO( "%s: CLS token = %d '%s'\n", __func__, vocab.special_cls_id, vocab.id_to_token[vocab.special_cls_id].text.c_str() ); }
- if (vocab.special_mask_id != -1) { LLAMA_LOG_INFO( "%s: MASK token = %d '%s'\n", __func__, vocab.special_mask_id, vocab.id_to_token[vocab.special_mask_id].text.c_str() ); }
- if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
- if (vocab.special_prefix_id != -1) { LLAMA_LOG_INFO( "%s: PRE token = %d '%s'\n", __func__, vocab.special_prefix_id, vocab.id_to_token[vocab.special_prefix_id].text.c_str() ); }
- if (vocab.special_suffix_id != -1) { LLAMA_LOG_INFO( "%s: SUF token = %d '%s'\n", __func__, vocab.special_suffix_id, vocab.id_to_token[vocab.special_suffix_id].text.c_str() ); }
- if (vocab.special_middle_id != -1) { LLAMA_LOG_INFO( "%s: MID token = %d '%s'\n", __func__, vocab.special_middle_id, vocab.id_to_token[vocab.special_middle_id].text.c_str() ); }
- if (vocab.special_eot_id != -1) { LLAMA_LOG_INFO( "%s: EOT token = %d '%s'\n", __func__, vocab.special_eot_id, vocab.id_to_token[vocab.special_eot_id].text.c_str() ); }
- if (vocab.special_eom_id != -1) { LLAMA_LOG_INFO( "%s: EOM token = %d '%s'\n", __func__, vocab.special_eom_id, vocab.id_to_token[vocab.special_eom_id].text.c_str() ); }
- for (const auto & id : vocab.special_eog_ids) {
- LLAMA_LOG_INFO( "%s: EOG token = %d '%s'\n", __func__, id, vocab.id_to_token[id].text.c_str() );
- }
- LLAMA_LOG_INFO("%s: max token length = %d\n", __func__, vocab.max_token_len);
- if (model.arch == LLM_ARCH_DEEPSEEK2) {
- LLAMA_LOG_INFO("%s: n_layer_dense_lead = %d\n", __func__, hparams.n_layer_dense_lead);
- LLAMA_LOG_INFO("%s: n_lora_q = %d\n", __func__, hparams.n_lora_q);
- LLAMA_LOG_INFO("%s: n_lora_kv = %d\n", __func__, hparams.n_lora_kv);
- LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
- LLAMA_LOG_INFO("%s: n_expert_shared = %d\n", __func__, hparams.n_expert_shared);
- LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n", __func__, hparams.expert_weights_scale);
- LLAMA_LOG_INFO("%s: rope_yarn_log_mul = %.4f\n", __func__, hparams.rope_yarn_log_mul);
- }
- if (model.arch == LLM_ARCH_QWEN2MOE) {
- LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
- LLAMA_LOG_INFO("%s: n_ff_shexp = %d\n", __func__, hparams.n_ff_shexp);
- }
- if (model.arch == LLM_ARCH_GRANITE || model.arch == LLM_ARCH_GRANITE_MOE) {
- LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale);
- LLAMA_LOG_INFO("%s: f_residual_scale = %f\n", __func__, hparams.f_residual_scale);
- LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale);
- }
- }
- // Returns false if cancelled by progress_callback
- static bool llm_load_tensors(
- llama_model_loader & ml,
- llama_model & model,
- int n_gpu_layers,
- enum llama_split_mode split_mode,
- int main_gpu,
- const float * tensor_split,
- bool use_mlock,
- llama_progress_callback progress_callback,
- void * progress_callback_user_data) {
- auto & hparams = model.hparams;
- model.split_mode = split_mode;
- model.main_gpu = main_gpu;
- model.n_gpu_layers = n_gpu_layers;
- const int n_layer = hparams.n_layer;
- const int i_gpu_start = std::max((int) hparams.n_layer - n_gpu_layers, (int) 0);
- bool use_mmap_buffer = true;
- // there is very little benefit to offloading the input layer, so always keep it on the CPU
- model.buft_input = llama_default_buffer_type_cpu(true);
- //model.buft_input = llama_default_buffer_type_offload(main_gpu);
- model.buft_layer.resize(n_layer);
- // assign cpu layers
- for (int i = 0; i < i_gpu_start; ++i) {
- model.buft_layer[i] = llama_default_buffer_type_cpu(true);
- }
- if (split_mode == LLAMA_SPLIT_MODE_LAYER) {
- // calculate the split points
- int device_count = llama_get_device_count(model);
- bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + device_count, [](float x) { return x == 0.0f; });
- std::vector<float> splits(device_count);
- if (all_zero) {
- // default split, by free memory
- for (int i = 0; i < device_count; ++i) {
- splits[i] = llama_get_device_memory(model, i);
- }
- } else {
- std::copy(tensor_split, tensor_split + device_count, splits.begin());
- }
- // sum and normalize the splits to get the split points
- float split_sum = 0.0f;
- for (int i = 0; i < device_count; ++i) {
- split_sum += splits[i];
- splits[i] = split_sum;
- }
- for (int i = 0; i < device_count; ++i) {
- splits[i] /= split_sum;
- }
- // assign the repeating layers to the devices according to the splits
- int act_gpu_layers = std::min(n_gpu_layers, (int)n_layer + 1);
- for (int i = i_gpu_start; i < n_layer; ++i) {
- int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(i - i_gpu_start)/act_gpu_layers) - splits.begin();
- model.buft_layer[i] = llama_default_buffer_type_offload(model, layer_gpu);
- }
- // assign the output layer
- if (n_gpu_layers > n_layer) {
- int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(act_gpu_layers - 1)/act_gpu_layers) - splits.begin();
- model.buft_output = llama_default_buffer_type_offload(model, layer_gpu);
- } else {
- model.buft_output = llama_default_buffer_type_cpu(true);
- }
- } else {
- ggml_backend_buffer_type_t split_buft;
- if (split_mode == LLAMA_SPLIT_MODE_ROW) {
- split_buft = llama_default_buffer_type_split(model, main_gpu, tensor_split);
- } else {
- // LLAMA_SPLIT_MODE_NONE or LLAMA_SPLIT_MODE_LAYER in backends where it is not supported
- split_buft = llama_default_buffer_type_offload(model, main_gpu);
- }
- // assign the repeating layers
- for (int i = i_gpu_start; i < n_layer; ++i) {
- model.buft_layer[i] = {
- split_buft,
- llama_default_buffer_type_offload(model, main_gpu)
- };
- }
- // assign the output layer
- if (n_gpu_layers > n_layer) {
- model.buft_output = {
- split_buft,
- llama_default_buffer_type_offload(model, main_gpu)
- };
- } else {
- model.buft_output = llama_default_buffer_type_cpu(true);
- }
- }
- // count used buffer types
- std::map<ggml_backend_buffer_type_t, int> buft_layer_count;
- buft_layer_count[model.buft_input.buft]++;
- buft_layer_count[model.buft_input.buft_matrix]++;
- buft_layer_count[model.buft_output.buft]++;
- buft_layer_count[model.buft_output.buft_matrix]++;
- for (int i = 0; i < n_layer; ++i) {
- buft_layer_count[model.buft_layer[i].buft]++;
- buft_layer_count[model.buft_layer[i].buft_matrix]++;
- }
- // create one context per buffer type
- size_t ctx_size = ggml_tensor_overhead()*(ml.n_tensors + 1); // +1 for models where tok_embd is duplicated as output
- // for moe merged tensors
- ctx_size += ggml_tensor_overhead()*n_layer*3;
- std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
- for (auto & it : buft_layer_count) {
- struct ggml_init_params params = {
- /*.mem_size =*/ ctx_size,
- /*.mem_buffer =*/ NULL,
- /*.no_alloc =*/ true,
- };
- ggml_context * ctx = ggml_init(params);
- if (!ctx) {
- throw std::runtime_error(format("failed to create context"));
- }
- ctx_map[it.first] = ctx;
- model.ctxs.push_back(ctx);
- }
- LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0);
- // create tensors for the weights
- {
- // note: cast to int64_t since we will use these for the tensor dimensions
- const int64_t n_head = hparams.n_head();
- const int64_t n_head_kv = hparams.n_head_kv();
- const int64_t n_embd = hparams.n_embd;
- const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
- const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
- const int64_t n_embd_head_k = hparams.n_embd_head_k;
- const int64_t n_embd_head_v = hparams.n_embd_head_v;
- const int64_t n_ff = hparams.n_ff();
- const int64_t n_embd_gqa = n_embd_v_gqa;
- const int64_t n_vocab = hparams.n_vocab;
- const int64_t n_vocab_type = hparams.n_vocab_type;
- const int64_t n_rot = hparams.n_rot;
- const int64_t n_expert = hparams.n_expert;
- const int64_t n_expert_used = hparams.n_expert_used;
- const int64_t n_ctx_train = hparams.n_ctx_train;
- if (n_expert > 0 && hparams.n_expert_used == 0) {
- throw std::runtime_error("model has expert layers but no expert layers are used");
- }
- ggml_context * ctx_input = ctx_map.at(model.buft_input.buft);
- ggml_context * ctx_output = ctx_map.at(model.buft_output.buft);
- ggml_context * ctx_output_split = ctx_map.at(model.buft_output.buft_matrix);
- auto ctx_for_layer = [&](int i) { return ctx_map.at(model.buft_layer[i].buft); };
- auto ctx_for_layer_split = [&](int i) { return ctx_map.at(model.buft_layer[i].buft_matrix); };
- model.layers.resize(n_layer);
- const auto tn = LLM_TN(model.arch);
- switch (model.arch) {
- case LLM_ARCH_LLAMA:
- case LLM_ARCH_REFACT:
- case LLM_ARCH_MINICPM:
- case LLM_ARCH_GRANITE:
- case LLM_ARCH_GRANITE_MOE:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // if output is NULL, init from the input tok embed
- if (model.output == NULL) {
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd});
- // optional bias tensors
- layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.rope_freqs = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ROPE_FREQS, "weight"), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
- if (n_expert == 0) {
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- // optional MLP bias
- layer.ffn_gate_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
- } else {
- layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
- layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, llama_model_loader::TENSOR_NOT_REQUIRED);
- if (layer.ffn_gate_exps) {
- layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert});
- layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert});
- } else {
- // merge split expert into a single tensor for compatibility with older models
- // requires disabling mmap
- use_mmap_buffer = false;
- ggml_type type_gate = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, 0).c_str())->type;
- ggml_type type_down = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, 0).c_str())->type;
- ggml_type type_up = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, 0).c_str())->type;
- layer.ffn_gate_exps = ggml_new_tensor_3d(ctx_split, type_gate, n_embd, n_ff, n_expert);
- layer.ffn_down_exps = ggml_new_tensor_3d(ctx_split, type_down, n_ff, n_embd, n_expert);
- layer.ffn_up_exps = ggml_new_tensor_3d(ctx_split, type_up, n_embd, n_ff, n_expert);
- ggml_set_name(layer.ffn_gate_exps, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i).c_str());
- ggml_set_name(layer.ffn_down_exps, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i).c_str());
- ggml_set_name(layer.ffn_up_exps, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i).c_str());
- for (uint32_t x = 0; x < n_expert; ++x) {
- // the individual experts are loaded into a view of the merged tensor
- ml.create_tensor_as_view(ctx_split, layer.ffn_gate_exps, tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, x), { n_embd, n_ff }, layer.ffn_gate_exps->nb[2]*x);
- ml.create_tensor_as_view(ctx_split, layer.ffn_down_exps, tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, x), { n_ff, n_embd }, layer.ffn_down_exps->nb[2]*x);
- ml.create_tensor_as_view(ctx_split, layer.ffn_up_exps, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), { n_embd, n_ff }, layer.ffn_up_exps->nb[2]*x);
- }
- }
- }
- }
- } break;
- case LLM_ARCH_MINICPM3:
- {
- const int64_t n_embd_head_qk_rope = hparams.n_rot;
- const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
- const int64_t q_lora_rank = hparams.n_lora_q;
- const int64_t kv_lora_rank = hparams.n_lora_kv;
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // if output is NULL, init from the input tok embed
- if (model.output == NULL) {
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_q_a_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank});
- layer.attn_kv_a_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank});
- layer.wq_a = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank});
- layer.wq_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k});
- layer.wkv_a_mqa = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)});
- layer.wkv_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_KV_B, "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_head * ( n_embd_head_v), n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.rope_long = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight"), { n_embd_head_qk_rope/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
- layer.rope_short = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight"), { n_embd_head_qk_rope/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
- }
- } break;
- case LLM_ARCH_MLLAMA:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab+8});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // if output is NULL, init from the input tok embed
- if (model.output == NULL) {
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- if (hparams.cross_attention_layers(i)) {
- layer.cross_attn_k_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_K_NORM, "weight", i), {128});
- layer.cross_attn_k_proj = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_K_PROJ, "weight", i), {n_embd, 1024});
- layer.cross_attn_o_proj = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_O_PROJ, "weight", i), {n_embd, n_embd});
- layer.cross_attn_q_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_Q_NORM, "weight", i), {128});
- layer.cross_attn_q_proj = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_Q_PROJ, "weight", i), {n_embd, n_embd});
- layer.cross_attn_v_proj = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_V_PROJ, "weight", i), {n_embd, 1024});
- layer.cross_attn_attn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_ATTN_GATE, i), {1});
- layer.cross_attn_mlp_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_CROSS_ATTN_MLP_GATE, i), {1});
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- } else {
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.rope_freqs = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ROPE_FREQS, "weight"), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- }
- } break;
- case LLM_ARCH_GROK:
- {
- if (n_expert == 0) {
- throw std::runtime_error("Grok model cannot have zero experts");
- }
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // if output is NULL, init from the input tok embed
- if (model.output == NULL) {
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
- layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, llama_model_loader::TENSOR_NOT_REQUIRED);
- if (layer.ffn_gate_exps) {
- layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert});
- layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert});
- } else {
- // merge split expert into a single tensor for compatibility with older models
- // requires disabling mmap
- use_mmap_buffer = false;
- ggml_type type_gate = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, 0).c_str())->type;
- ggml_type type_down = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, 0).c_str())->type;
- ggml_type type_up = ml.require_tensor_meta(tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, 0).c_str())->type;
- layer.ffn_gate_exps = ggml_new_tensor_3d(ctx_split, type_gate, n_embd, n_ff, n_expert);
- layer.ffn_down_exps = ggml_new_tensor_3d(ctx_split, type_down, n_ff, n_embd, n_expert);
- layer.ffn_up_exps = ggml_new_tensor_3d(ctx_split, type_up, n_embd, n_ff, n_expert);
- ggml_set_name(layer.ffn_gate_exps, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i).c_str());
- ggml_set_name(layer.ffn_down_exps, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i).c_str());
- ggml_set_name(layer.ffn_up_exps, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i).c_str());
- for (uint32_t x = 0; x < n_expert; ++x) {
- // the individual experts are loaded into a view of the merged tensor
- ml.create_tensor_as_view(ctx_split, layer.ffn_gate_exps, tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, x), { n_embd, n_ff }, layer.ffn_gate_exps->nb[2]*x);
- ml.create_tensor_as_view(ctx_split, layer.ffn_down_exps, tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, x), { n_ff, n_embd }, layer.ffn_down_exps->nb[2]*x);
- ml.create_tensor_as_view(ctx_split, layer.ffn_up_exps, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), { n_embd, n_ff }, layer.ffn_up_exps->nb[2]*x);
- }
- }
- layer.layer_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
- }
- } break;
- case LLM_ARCH_DBRX:
- {
- if (n_expert == 0) {
- throw std::runtime_error("DBRX model cannot have zero experts");
- }
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd});
- layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
- layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert});
- layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert});
- layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert});
- }
- } break;
- case LLM_ARCH_BAICHUAN:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_FALCON:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- if (!model.output) {
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // needs to be on GPU
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
- layer.attn_norm_2 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.attn_norm_2_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_STARCODER:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- if (!model.output) {
- // needs to be on GPU
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
- layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
- }
- } break;
- case LLM_ARCH_BERT:
- case LLM_ARCH_NOMIC_BERT:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- model.type_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type});
- if (model.arch == LLM_ARCH_BERT) {
- model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train});
- model.cls = ml.create_tensor(ctx_output, tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- model.cls_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_CLS, "bias"), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- model.cls_out = ml.create_tensor(ctx_output, tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
- model.cls_out_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_CLS_OUT, "bias"), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
- }
- model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd});
- model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd});
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- if (model.arch == LLM_ARCH_BERT) {
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
- } else {
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
- }
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd});
- layer.attn_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- if (model.arch == LLM_ARCH_BERT) {
- layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
- layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
- layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
- } else {
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- }
- layer.layer_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
- layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
- }
- } break;
- case LLM_ARCH_JINA_BERT_V2:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // word_embeddings
- model.type_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type}); // token_type_embeddings
- model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}); // LayerNorm
- model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}); //LayerNorm bias
- model.cls = ml.create_tensor(ctx_output, tn(LLM_TENSOR_CLS, "weight"), {n_embd, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
- model.cls_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_CLS, "bias"), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i]; // JinaBertLayer
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
- layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.attn_q_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
- layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.attn_k_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); //output_dens
- layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}); //output_dens
- layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}); //output_norm
- layer.attn_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd});
- layer.attn_norm_2 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.attn_norm_2_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
- layer.layer_out_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
- layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
- }
- } break;
- case LLM_ARCH_BLOOM:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd});
- model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
- layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
- }
- } break;
- case LLM_ARCH_MPT:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- if (!model.output) {
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // needs to be on GPU
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
- layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.attn_q_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.attn_k_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // AWQ ScaleActivation layer
- layer.ffn_act = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
- }
- } break;
- case LLM_ARCH_STABLELM:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- // optional bias tensors, present in Stable LM 2 1.6B
- layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // optional q and k layernorms, present in StableLM 2 12B
- layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // optional FFN norm, not present in StableLM 2 12B which uses parallel residual
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_QWEN:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd*3});
- layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd*3});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff/2});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff/2, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff/2});
- }
- } break;
- case LLM_ARCH_QWEN2:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // if output is NULL, init from the input tok embed
- if (model.output == NULL) {
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- // optional bias tensors
- layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
- layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
- layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_QWEN2MOE:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- // optional bias tensors
- layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
- layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
- layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
- GGML_ASSERT(n_expert > 0);
- GGML_ASSERT(n_expert_used > 0);
- // MoE branch
- const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
- layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert});
- layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert});
- layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert});
- // Shared expert branch
- const int64_t n_ff_shexp = hparams.n_ff_shexp ? hparams.n_ff_shexp : n_ff;
- layer.ffn_gate_inp_shexp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP_SHEXP, "weight", i), {n_embd});
- layer.ffn_gate_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), { n_embd, n_ff_shexp});
- layer.ffn_down_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd});
- layer.ffn_up_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), { n_embd, n_ff_shexp});
- }
- } break;
- case LLM_ARCH_PHI2:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- model.output_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
- if (layer.wqkv == nullptr) {
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
- }
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
- }
- } break;
- case LLM_ARCH_PHI3:
- {
- const int64_t n_embd_head = n_embd / n_head;
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab });
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd });
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab });
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd });
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd });
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd });
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd });
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * n_ff });
- layer.rope_long = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight"), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
- layer.rope_short = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight"), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
- }
- } break;
- case LLM_ARCH_PLAMO:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_GPT2:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
- layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
- }
- } break;
- case LLM_ARCH_CODESHELL:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
- layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
- }
- } break;
- case LLM_ARCH_ORION:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_INTERNLM2:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- // layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_GEMMA:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- }
- } break;
- case LLM_ARCH_GEMMA2:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd});
- layer.attn_post_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_post_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd});
- }
- } break;
- case LLM_ARCH_STARCODER2:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // if output is NULL, init from the input tok embed
- if (model.output == NULL) {
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- // optional bias tensors
- layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
- layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
- layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
- layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- // optional bias tensors
- layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
- layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP , "bias", i), { n_ff});
- }
- } break;
- case LLM_ARCH_MAMBA:
- {
- const int64_t d_conv = hparams.ssm_d_conv;
- const int64_t d_inner = hparams.ssm_d_inner;
- const int64_t d_state = hparams.ssm_d_state;
- const int64_t dt_rank = hparams.ssm_dt_rank;
- // only an expansion factor of 2 is supported for now
- GGML_ASSERT(2 * n_embd == d_inner);
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // if output is NULL, init from the input tok embed, duplicated to allow offloading
- if (model.output == NULL) {
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- // norm
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.ssm_in = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, 2*d_inner});
- layer.ssm_conv1d = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner});
- layer.ssm_conv1d_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner});
- layer.ssm_x = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_X, "weight", i), {d_inner, dt_rank + 2*d_state});
- layer.ssm_dt = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_rank, d_inner});
- layer.ssm_dt_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_DT, "bias", i), {d_inner});
- // no "weight" suffix for these
- layer.ssm_a = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_A, i), {d_state, d_inner});
- layer.ssm_d = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_SSM_D, i), {d_inner});
- // out_proj
- layer.ssm_out = ml.create_tensor(ctx_split, tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd});
- }
- } break;
- case LLM_ARCH_XVERSE:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_COMMAND_R:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- // init output from the input tok embed
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- if (n_layer >= 64){
- layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head});
- layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv});
- }
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_OLMO: // adapted from LLM_ARCH_LLAMA with norm params removed
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // if output is NULL, init from the input tok embed
- if (model.output == NULL) {
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_OLMOE:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd});
- layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
- GGML_ASSERT(n_expert > 0);
- GGML_ASSERT(n_expert_used > 0);
- // MoE branch
- layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert});
- layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert});
- layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert});
- }
- } break;
- case LLM_ARCH_OPENELM:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- // init output from the input tok embed
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- for (int i = 0; i < n_layer; ++i) {
- const int64_t n_head = hparams.n_head(i);
- const int64_t n_head_qkv = 2*hparams.n_head_kv(i) + n_head;
- const int64_t n_ff = hparams.n_ff(i);
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_head_qkv*n_embd_head_k});
- layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k});
- layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head*n_embd_head_k, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_GPTNEOX:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
- layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
- }
- } break;
- case LLM_ARCH_ARCTIC:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // if output is NULL, init from the input tok embed
- if (model.output == NULL) {
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_embd});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_embd, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_embd});
- layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
- layer.ffn_norm_exps = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM_EXPS, "weight", i), {n_embd});
- layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, false);
- layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert});
- layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert});
- }
- } break;
- case LLM_ARCH_DEEPSEEK2:
- {
- const bool is_lite = (hparams.n_layer == 27);
- const int64_t n_embd_head_qk_rope = hparams.n_rot;
- const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
- const int64_t q_lora_rank = hparams.n_lora_q;
- const int64_t kv_lora_rank = hparams.n_lora_kv;
- const int64_t n_ff_exp = hparams.n_ff_exp;
- const int64_t n_expert_shared = hparams.n_expert_shared;
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- if (!is_lite) {
- layer.attn_q_a_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank});
- }
- layer.attn_kv_a_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank});
- if (!is_lite) {
- layer.wq_a = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank});
- layer.wq_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k});
- } else {
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa});
- }
- layer.wkv_a_mqa = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)});
- layer.wkv_b = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_KV_B, "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_head * ( n_embd_head_v), n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- if (i < (int) hparams.n_layer_dense_lead) {
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- } else {
- layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert});
- GGML_ASSERT(n_expert > 0);
- GGML_ASSERT(n_expert_used > 0);
- // MoE branch
- layer.ffn_gate_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert});
- layer.ffn_down_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert});
- layer.ffn_up_exps = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert});
- // Shared expert branch
- layer.ffn_gate_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared});
- layer.ffn_down_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * n_expert_shared, n_embd});
- layer.ffn_up_shexp = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared});
- }
- }
- } break;
- case LLM_ARCH_BITNET:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_sub_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_SUB_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wq_scale = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "scale", i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wk_scale = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "scale", i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wv_scale = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "scale", i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.wo_scale = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "scale", i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_sub_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_SUB_NORM, "weight", i), {n_ff});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_gate_scale = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE, "scale", i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- layer.ffn_down_scale = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "scale", i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_up_scale = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "scale", i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
- }
- } break;
- case LLM_ARCH_T5:
- {
- const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm_enc = ml.create_tensor(ctx_output, tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_DEC_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // if output is NULL, init from the input tok embed
- if (model.output == NULL) {
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm_enc = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ENC_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_rel_b_enc = ml.create_tensor(ctx_input, tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wq_enc = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ENC_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wk_enc = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ENC_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wv_enc = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ENC_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
- layer.wo_enc = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd});
- layer.ffn_norm_enc = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate_enc = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd, n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_down_enc = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up_enc = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ENC_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_DEC_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_rel_b = ml.create_tensor(ctx_input, tn(LLM_TENSOR_DEC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_DEC_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_DEC_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_DEC_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_DEC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd});
- layer.attn_norm_cross = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_DEC_CROSS_ATTN_NORM, "weight", i), {n_embd});
- // this tensor seems to be unused in HF transformers implementation
- layer.attn_rel_b_cross = ml.create_tensor(ctx_input, tn(LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wq_cross = ml.create_tensor(ctx_split, tn(LLM_TENSOR_DEC_CROSS_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wk_cross = ml.create_tensor(ctx_split, tn(LLM_TENSOR_DEC_CROSS_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wv_cross = ml.create_tensor(ctx_split, tn(LLM_TENSOR_DEC_CROSS_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
- layer.wo_cross = ml.create_tensor(ctx_split, tn(LLM_TENSOR_DEC_CROSS_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_DEC_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_DEC_FFN_GATE, "weight", i), {n_embd, n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_DEC_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_DEC_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_T5ENCODER:
- {
- const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm_enc = ml.create_tensor(ctx_output, tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // if output is NULL, init from the input tok embed
- if (model.output == NULL) {
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm_enc = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ENC_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_rel_b_enc = ml.create_tensor(ctx_input, tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wq_enc = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ENC_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wk_enc = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ENC_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wv_enc = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ENC_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
- layer.wo_enc = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd});
- layer.ffn_norm_enc = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate_enc = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd, n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_down_enc = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up_enc = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ENC_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_JAIS:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // Output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
- layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_gate_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff});
- }
- } break;
- case LLM_ARCH_CHATGLM:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
- layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff * 2});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
- }
- } break;
- case LLM_ARCH_NEMOTRON:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- // optional bias tensors
- layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- // optional MLP bias
- layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
- }
- } break;
- case LLM_ARCH_EXAONE:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.rope_freqs = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ROPE_FREQS, "weight"), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_RWKV6:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // Block 0, LN0
- model.tok_norm = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd});
- model.tok_norm_b = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd});
- // output
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
- const int time_mix_extra_dim = hparams.time_mix_extra_dim;
- const int time_decay_extra_dim = hparams.time_decay_extra_dim;
- const int head_size = hparams.wkv_head_size;
- const int attn_hidden_size = n_embd;
- const int ffn_size = hparams.n_ff_arr[0];
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd});
- layer.attn_norm_2 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd});
- layer.attn_norm_2_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd});
- layer.time_mix_w1 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5});
- layer.time_mix_w2 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5});
- layer.time_mix_lerp_x = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1});
- layer.time_mix_lerp_w = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_LERP_W, "weight", i), {n_embd, 1, 1});
- layer.time_mix_lerp_k = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_LERP_K, "weight", i), {n_embd, 1, 1});
- layer.time_mix_lerp_v = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_LERP_V, "weight", i), {n_embd, 1, 1});
- layer.time_mix_lerp_r = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_LERP_R, "weight", i), {n_embd, 1, 1});
- layer.time_mix_lerp_g = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_LERP_G, "weight", i), {n_embd, 1, 1});
- layer.time_mix_first = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size});
- layer.time_mix_decay = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd});
- layer.time_mix_decay_w1 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim});
- layer.time_mix_decay_w2 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size});
- layer.time_mix_key = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd});
- layer.time_mix_value = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd});
- layer.time_mix_receptance = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd});
- layer.time_mix_gate = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd});
- layer.time_mix_ln = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd});
- layer.time_mix_ln_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd});
- layer.time_mix_output = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size});
- layer.channel_mix_lerp_k = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_CHANNEL_MIX_LERP_K, "weight", i), {n_embd, 1, 1});
- layer.channel_mix_lerp_r = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_CHANNEL_MIX_LERP_R, "weight", i), {n_embd, 1, 1});
- layer.channel_mix_key = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_CHANNEL_MIX_KEY, "weight", i), {n_embd, ffn_size});
- layer.channel_mix_value = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_CHANNEL_MIX_VALUE, "weight", i), {ffn_size, n_embd});
- layer.channel_mix_receptance = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "weight", i), {n_embd, n_embd});
- }
- } break;
- case LLM_ARCH_CHAMELEON:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- // if output is NULL, init from the input tok embed
- if (model.output == NULL) {
- model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
- }
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head});
- layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv});
- layer.attn_q_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd_head_k, n_head}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.attn_k_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd_head_k, n_head_kv}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- case LLM_ARCH_SOLAR:
- {
- model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
- // output
- {
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
- }
- for (int i = 0; i < n_layer; ++i) {
- ggml_context * ctx_layer = ctx_for_layer(i);
- ggml_context * ctx_split = ctx_for_layer_split(i);
- auto & layer = model.layers[i];
- layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head});
- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd});
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
- layer.bskcn_tv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_BSKCN_TV, "weight"), {2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
- layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
- layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
- layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
- }
- } break;
- default:
- throw std::runtime_error("unknown architecture");
- }
- }
- ml.done_getting_tensors();
- ml.init_mappings(true, use_mlock ? &model.mlock_mmaps : nullptr);
- model.mappings.reserve(ml.mappings.size());
- // create the backend buffers
- std::vector<std::pair<ggml_context *, llama_buf_map>> ctx_bufs;
- ctx_bufs.reserve(ctx_map.size());
- // Ensure we have enough capacity for the maximum backend buffer we will potentially create
- size_t n_max_backend_buffer = ctx_map.size() * ml.files.size();
- model.bufs.reserve(n_max_backend_buffer);
- for (auto & it : ctx_map) {
- ggml_backend_buffer_type_t buft = it.first;
- ggml_context * ctx = it.second;
- llama_buf_map bufs;
- bufs.reserve(n_max_backend_buffer);
- // only the mmap region containing the tensors in the model is mapped to the backend buffer
- // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers
- // this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size
- if (ml.use_mmap && use_mmap_buffer && buft == llama_default_buffer_type_cpu(true)) {
- for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
- void * addr = nullptr;
- size_t first, last;
- ml.get_mapping_range(&first, &last, &addr, idx, ctx);
- if (first >= last) {
- continue;
- }
- ggml_backend_buffer_t buf = ggml_backend_cpu_buffer_from_ptr((char *) addr + first, last - first);
- if (buf == nullptr) {
- throw std::runtime_error("unable to allocate backend CPU buffer");
- }
- model.bufs.push_back(buf);
- bufs.emplace(idx, buf);
- #ifdef GGML_USE_CUDA
- if (n_layer >= n_gpu_layers) {
- ggml_backend_cuda_register_host_buffer(
- ggml_backend_buffer_get_base(buf),
- ggml_backend_buffer_get_size(buf));
- }
- #endif
- }
- }
- #ifdef GGML_USE_METAL
- else if (ml.use_mmap && use_mmap_buffer && buft == ggml_backend_metal_buffer_type()) {
- for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
- const size_t max_size = ggml_get_max_tensor_size(ctx);
- void * addr = nullptr;
- size_t first, last;
- ml.get_mapping_range(&first, &last, &addr, idx, ctx);
- if (first >= last) {
- continue;
- }
- ggml_backend_buffer_t buf = ggml_backend_metal_buffer_from_ptr((char *) addr + first, last - first, max_size);
- if (buf == nullptr) {
- throw std::runtime_error("unable to allocate backend metal buffer");
- }
- model.bufs.push_back(buf);
- bufs.emplace(idx, buf);
- }
- }
- #endif
- else {
- ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
- if (buf == nullptr) {
- throw std::runtime_error("unable to allocate backend buffer");
- }
- model.bufs.push_back(buf);
- if (use_mlock && ggml_backend_buffer_is_host(buf)) {
- model.mlock_bufs.emplace_back(new llama_mlock);
- auto & mlock_buf = model.mlock_bufs.back();
- mlock_buf->init (ggml_backend_buffer_get_base(buf));
- mlock_buf->grow_to(ggml_backend_buffer_get_size(buf));
- }
- for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
- bufs.emplace(idx, buf);
- }
- }
- if (bufs.empty()) {
- throw std::runtime_error("failed to allocate buffer");
- }
- for (auto & buf : bufs) {
- // indicate that this buffer contains weights
- // this is used by ggml_backend_sched to improve op scheduling -> ops that use a weight are preferably scheduled to the backend that contains the weight
- ggml_backend_buffer_set_usage(buf.second, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
- }
- ctx_bufs.emplace_back(ctx, bufs);
- }
- if (llama_supports_gpu_offload()) {
- const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
- LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
- if (n_gpu_layers > (int) hparams.n_layer) {
- LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
- }
- const int max_backend_supported_layers = hparams.n_layer + 1;
- const int max_offloadable_layers = hparams.n_layer + 1;
- LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
- }
- // print memory requirements
- for (ggml_backend_buffer_t buf : model.bufs) {
- LLAMA_LOG_INFO("%s: %10s buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0);
- }
- // populate tensors_by_name
- for (ggml_context * ctx : model.ctxs) {
- for (auto * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
- model.tensors_by_name.emplace_back(ggml_get_name(cur), cur);
- }
- }
- // load tensor data
- for (auto & it : ctx_bufs) {
- ggml_context * ctx = it.first;
- auto & bufs = it.second;
- if (!ml.load_all_data(ctx, bufs, use_mlock ? &model.mlock_mmaps : NULL, progress_callback, progress_callback_user_data)) {
- return false;
- }
- }
- if (use_mmap_buffer) {
- for (auto & mapping : ml.mappings) {
- model.mappings.emplace_back(std::move(mapping));
- }
- }
- return true;
- }
- // Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
- static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) {
- model.t_start_us = ggml_time_us();
- try {
- llama_model_loader ml(fname, params.use_mmap, params.check_tensors, params.kv_overrides);
- model.hparams.vocab_only = params.vocab_only;
- try {
- llm_load_arch(ml, model);
- } catch(const std::exception & e) {
- throw std::runtime_error("error loading model architecture: " + std::string(e.what()));
- }
- try {
- llm_load_hparams(ml, model);
- } catch(const std::exception & e) {
- throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what()));
- }
- try {
- llm_load_vocab(ml, model);
- } catch(const std::exception & e) {
- throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
- }
- llm_load_print_meta(ml, model);
- if (model.vocab.type != LLAMA_VOCAB_TYPE_NONE &&
- model.hparams.n_vocab != model.vocab.id_to_token.size()) {
- LLAMA_LOG_WARN("%s: vocab mismatch %u !- %zu ...\n", __func__, model.hparams.n_vocab, model.vocab.id_to_token.size());
- }
- if (params.vocab_only) {
- LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
- return 0;
- }
- #ifdef GGML_USE_KOMPUTE
- if (params.n_gpu_layers > 0 && (
- !(model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON)
- || !(
- model.ftype == LLAMA_FTYPE_ALL_F32 ||
- model.ftype == LLAMA_FTYPE_MOSTLY_F16 ||
- model.ftype == LLAMA_FTYPE_MOSTLY_BF16 ||
- model.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ||
- model.ftype == LLAMA_FTYPE_MOSTLY_Q4_1
- )
- )) {
- // TODO(cebtenzzre): propagate this error outside of llama_load_model_from_file
- LLAMA_LOG_WARN("%s: disabling Kompute due to unsupported model arch or quantization\n", __func__);
- params.n_gpu_layers = 0;
- }
- #endif
- if (!llm_load_tensors(
- ml, model, params.n_gpu_layers, params.split_mode, params.main_gpu, params.tensor_split, params.use_mlock,
- params.progress_callback, params.progress_callback_user_data
- )) {
- return -2;
- }
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
- return -1;
- }
- // loading time will be recalculate after the first eval, so
- // we take page faults deferred by mmap() into consideration
- model.t_load_us = ggml_time_us() - model.t_start_us;
- return 0;
- }
- //
- // llm_build
- //
- using llm_build_cb = std::function<void(struct ggml_tensor * cur, const char * name, int nl)>;
- enum llm_ffn_op_type {
- LLM_FFN_SILU,
- LLM_FFN_GELU,
- LLM_FFN_RELU,
- LLM_FFN_RELU_SQR,
- LLM_FFN_SWIGLU,
- };
- enum llm_ffn_gate_type {
- LLM_FFN_SEQ,
- LLM_FFN_PAR, // ffn_gate is parallel to ffn_up
- };
- enum llm_norm_type {
- LLM_NORM,
- LLM_NORM_RMS,
- };
- static struct ggml_tensor * llm_build_inp_embd(
- struct ggml_context * ctx,
- struct llama_context & lctx,
- const llama_hparams & hparams,
- const llama_ubatch & batch,
- struct ggml_tensor * tok_embd,
- const llm_build_cb & cb) {
- const int64_t n_embd = hparams.n_embd;
- struct ggml_tensor * inpL;
- if (batch.token) {
- lctx.inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, batch.n_tokens);
- cb(lctx.inp_tokens, "inp_tokens", -1);
- ggml_set_input(lctx.inp_tokens);
- inpL = ggml_get_rows(ctx, tok_embd, lctx.inp_tokens);
- } else {
- lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens);
- inpL = lctx.inp_embd;
- ggml_set_input(lctx.inp_embd);
- }
- // For Granite architecture
- if (hparams.f_embedding_scale != 0.0f) {
- inpL = ggml_scale(ctx, inpL, hparams.f_embedding_scale);
- }
- cb(inpL, "inp_embd", -1);
- return inpL;
- }
- static struct ggml_tensor * llm_build_inp_cross_attn_state(
- struct ggml_context * ctx,
- struct llama_context & lctx,
- const llama_hparams & hparams,
- const llm_build_cb & cb) {
- const int64_t n_embd = hparams.n_embd;
- struct ggml_tensor * inpCAS = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_embd, 1601, 4);
- cb(inpCAS, "inp_cross_attn_state", -1);
- ggml_set_input(inpCAS);
- lctx.inp_cross_attn_state = inpCAS;
- return inpCAS;
- }
- static void llm_build_kv_store(
- struct ggml_context * ctx,
- const llama_hparams & hparams,
- const llama_cparams & cparams,
- const llama_kv_cache & kv,
- struct ggml_cgraph * graph,
- struct ggml_tensor * k_cur,
- struct ggml_tensor * v_cur,
- int32_t n_tokens,
- int32_t kv_head,
- const llm_build_cb & cb,
- int64_t il) {
- const int64_t n_ctx = cparams.n_ctx;
- const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
- const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
- GGML_ASSERT(kv.size == n_ctx);
- struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k_l[il], n_tokens*n_embd_k_gqa, ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa)*kv_head);
- cb(k_cache_view, "k_cache_view", il);
- // note: storing RoPE-ed version of K in the KV cache
- ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view));
- assert(v_cur->ne[0] == n_embd_v_gqa && v_cur->ne[1] == n_tokens);
- struct ggml_tensor * v_cache_view = nullptr;
- if (cparams.flash_attn) {
- v_cache_view = ggml_view_1d(ctx, kv.v_l[il], n_tokens*n_embd_v_gqa, ggml_row_size(kv.v_l[il]->type, n_embd_v_gqa)*kv_head);
- } else {
- // note: the V cache is transposed when not using flash attention
- v_cache_view = ggml_view_2d(ctx, kv.v_l[il], n_tokens, n_embd_v_gqa,
- ( n_ctx)*ggml_element_size(kv.v_l[il]),
- (kv_head)*ggml_element_size(kv.v_l[il]));
- v_cur = ggml_transpose(ctx, v_cur);
- }
- cb(v_cache_view, "v_cache_view", il);
- ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur, v_cache_view));
- }
- // do mat_mul, while optionally apply lora
- static struct ggml_tensor * llm_build_lora_mm(
- struct llama_context & lctx,
- struct ggml_context * ctx0,
- struct ggml_tensor * w,
- struct ggml_tensor * cur) {
- struct ggml_tensor * res = ggml_mul_mat(ctx0, w, cur);
- for (auto & it : lctx.lora_adapters) {
- struct llama_lora_weight * lora = it.first->get_weight(w);
- if (lora == nullptr) {
- continue;
- }
- const float alpha = it.first->alpha;
- const float rank = (float) lora->b->ne[0];
- const float scale = alpha ? it.second * alpha / rank : it.second;
- struct ggml_tensor * ab_cur = ggml_mul_mat(
- ctx0, lora->b,
- ggml_mul_mat(ctx0, lora->a, cur)
- );
- ab_cur = ggml_scale(ctx0, ab_cur, scale);
- res = ggml_add(ctx0, res, ab_cur);
- }
- return res;
- }
- // do mat_mul_id, while optionally apply lora
- static struct ggml_tensor * llm_build_lora_mm_id(
- struct llama_context & lctx,
- struct ggml_context * ctx0,
- struct ggml_tensor * w, // struct ggml_tensor * as
- struct ggml_tensor * cur, // struct ggml_tensor * b
- struct ggml_tensor * ids) {
- struct ggml_tensor * res = ggml_mul_mat_id(ctx0, w, cur, ids);
- for (auto & it : lctx.lora_adapters) {
- struct llama_lora_weight * lora = it.first->get_weight(w);
- if (lora == nullptr) {
- continue;
- }
- const float alpha = it.first->alpha;
- const float rank = (float) lora->b->ne[0];
- const float scale = alpha ? it.second * alpha / rank : it.second;
- struct ggml_tensor * ab_cur = ggml_mul_mat_id(
- ctx0, lora->b,
- ggml_mul_mat_id(ctx0, lora->a, cur, ids),
- ids
- );
- ab_cur = ggml_scale(ctx0, ab_cur, scale);
- res = ggml_add(ctx0, res, ab_cur);
- }
- return res;
- }
- static struct ggml_tensor * llm_build_norm(
- struct ggml_context * ctx,
- struct ggml_tensor * cur,
- const llama_hparams & hparams,
- struct ggml_tensor * mw,
- struct ggml_tensor * mb,
- llm_norm_type type,
- const llm_build_cb & cb,
- int il) {
- switch (type) {
- case LLM_NORM: cur = ggml_norm (ctx, cur, hparams.f_norm_eps); break;
- case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hparams.f_norm_rms_eps); break;
- }
- if (mw || mb) {
- cb(cur, "norm", il);
- }
- if (mw) {
- cur = ggml_mul(ctx, cur, mw);
- if (mb) {
- cb(cur, "norm_w", il);
- }
- }
- if (mb) {
- cur = ggml_add(ctx, cur, mb);
- }
- return cur;
- }
- static struct ggml_tensor * llm_build_ffn(
- struct ggml_context * ctx,
- struct llama_context & lctx,
- struct ggml_tensor * cur,
- struct ggml_tensor * up,
- struct ggml_tensor * up_b,
- struct ggml_tensor * up_s,
- struct ggml_tensor * gate,
- struct ggml_tensor * gate_b,
- struct ggml_tensor * gate_s,
- struct ggml_tensor * down,
- struct ggml_tensor * down_b,
- struct ggml_tensor * down_s,
- struct ggml_tensor * act_scales,
- llm_ffn_op_type type_op,
- llm_ffn_gate_type type_gate,
- const llm_build_cb & cb,
- int il) {
- struct ggml_tensor * tmp = up ? llm_build_lora_mm(lctx, ctx, up, cur) : cur;
- cb(tmp, "ffn_up", il);
- if (up_b) {
- tmp = ggml_add(ctx, tmp, up_b);
- cb(tmp, "ffn_up_b", il);
- }
- if (up_s) {
- tmp = ggml_mul(ctx, tmp, up_s);
- cb(tmp, "ffn_up_s", il);
- }
- if (gate) {
- switch (type_gate) {
- case LLM_FFN_SEQ:
- {
- cur = llm_build_lora_mm(lctx, ctx, gate, tmp);
- cb(cur, "ffn_gate", il);
- } break;
- case LLM_FFN_PAR:
- {
- cur = llm_build_lora_mm(lctx, ctx, gate, cur);
- cb(cur, "ffn_gate", il);
- } break;
- }
- if (gate_b) {
- cur = ggml_add(ctx, cur, gate_b);
- cb(cur, "ffn_gate_b", il);
- }
- if (gate_s) {
- cur = ggml_mul(ctx, cur, gate_s);
- cb(cur, "ffn_gate_s", il);
- }
- } else {
- cur = tmp;
- }
- switch (type_op) {
- case LLM_FFN_SILU:
- {
- cur = ggml_silu(ctx, cur);
- cb(cur, "ffn_silu", il);
- } break;
- case LLM_FFN_GELU:
- {
- cur = ggml_gelu(ctx, cur);
- cb(cur, "ffn_gelu", il);
- if (act_scales != NULL) {
- cur = ggml_div(ctx, cur, act_scales);
- cb(cur, "ffn_act", il);
- }
- } break;
- case LLM_FFN_RELU:
- {
- cur = ggml_relu(ctx, cur);
- cb(cur, "ffn_relu", il);
- } break;
- case LLM_FFN_RELU_SQR:
- {
- cur = ggml_relu(ctx, cur);
- cb(cur, "ffn_relu", il);
- cur = ggml_sqr(ctx, cur);
- cb(cur, "ffn_sqr(relu)", il);
- } break;
- case LLM_FFN_SWIGLU:
- {
- // Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf
- int64_t split_point = cur->ne[0] / 2;
- struct ggml_tensor * x0 = ggml_cont(ctx, ggml_view_2d(ctx, cur, split_point, cur->ne[1], cur->nb[1], 0));
- struct ggml_tensor * x1 = ggml_cont(ctx, ggml_view_2d(ctx, cur, split_point, cur->ne[1], cur->nb[1], split_point * ggml_element_size(cur)));
- x0 = ggml_silu(ctx, x0);
- cb(cur, "ffn_silu", il);
- cur = ggml_mul(ctx, x0, x1);
- cb(cur, "ffn_mul", il);
- } break;
- }
- if (type_gate == LLM_FFN_PAR) {
- cur = ggml_mul(ctx, cur, tmp);
- cb(cur, "ffn_gate_par", il);
- }
- if (down) {
- cur = llm_build_lora_mm(lctx, ctx, down, cur);
- }
- if (down_b) {
- cb(cur, "ffn_down", il);
- }
- if (down_b) {
- cur = ggml_add(ctx, cur, down_b);
- }
- if (down_s) {
- cur = ggml_mul(ctx, cur, down_s);
- cb(cur, "ffn_down_s", il);
- }
- return cur;
- }
- static struct ggml_tensor * llm_build_moe_ffn(
- struct ggml_context * ctx,
- struct llama_context & lctx,
- struct ggml_tensor * cur,
- struct ggml_tensor * gate_inp,
- struct ggml_tensor * up_exps,
- struct ggml_tensor * gate_exps,
- struct ggml_tensor * down_exps,
- int64_t n_expert,
- int64_t n_expert_used,
- llm_ffn_op_type type_op,
- bool norm_w,
- bool scale_w,
- float w_scale,
- const llm_build_cb & cb,
- int il) {
- int64_t n_embd = cur->ne[0];
- int64_t n_tokens = cur->ne[1];
- ggml_tensor * logits = llm_build_lora_mm(lctx, ctx, gate_inp, cur); // [n_expert, n_tokens]
- cb(logits, "ffn_moe_logits", il);
- ggml_tensor * probs = ggml_soft_max(ctx, logits); // [n_expert, n_tokens]
- cb(probs, "ffn_moe_probs", il);
- // select experts
- ggml_tensor * selected_experts = ggml_top_k(ctx, probs, n_expert_used); // [n_expert_used, n_tokens]
- cb(selected_experts->src[0], "ffn_moe_argsort", il);
- cb(selected_experts, "ffn_moe_topk", il);
- ggml_tensor * weights = ggml_get_rows(ctx,
- ggml_reshape_3d(ctx, probs, 1, n_expert, n_tokens), selected_experts); // [1, n_expert_used, n_tokens]
- cb(weights, "ffn_moe_weights", il);
- if (norm_w) {
- weights = ggml_reshape_2d(ctx, weights, n_expert_used, n_tokens);
- ggml_tensor * weights_sum = ggml_sum_rows(ctx, weights); // [1, n_tokens]
- cb(weights_sum, "ffn_moe_weights_sum", il);
- weights = ggml_div(ctx, weights, weights_sum); // [n_expert_used, n_tokens]
- cb(weights, "ffn_moe_weights_norm", il);
- weights = ggml_reshape_3d(ctx, weights, 1, n_expert_used, n_tokens);
- }
- if (scale_w) {
- weights = ggml_scale(ctx, weights, w_scale);
- cb(weights, "ffn_moe_weights_scaled", il);
- }
- cur = ggml_reshape_3d(ctx, cur, n_embd, 1, n_tokens);
- ggml_tensor * up = llm_build_lora_mm_id(lctx, ctx, up_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
- cb(up, "ffn_moe_up", il);
- ggml_tensor * gate = llm_build_lora_mm_id(lctx, ctx, gate_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
- cb(gate, "ffn_moe_gate", il);
- switch (type_op) {
- case LLM_FFN_SILU:
- {
- gate = ggml_silu(ctx, gate);
- cb(gate, "ffn_moe_silu", il);
- } break;
- case LLM_FFN_GELU:
- {
- gate = ggml_gelu(ctx, gate);
- cb(gate, "ffn_moe_gelu", il);
- } break;
- default:
- GGML_ABORT("fatal error");
- }
- ggml_tensor * par = ggml_mul(ctx, up, gate); // [n_ff, n_expert_used, n_tokens]
- cb(par, "ffn_moe_gate_par", il);
- ggml_tensor * experts = llm_build_lora_mm_id(lctx, ctx, down_exps, par, selected_experts); // [n_embd, n_expert_used, n_tokens]
- cb(experts, "ffn_moe_down", il);
- experts = ggml_mul(ctx, experts, weights);
- // aggregate experts
- ggml_tensor * moe_out = nullptr;
- for (int i = 0; i < n_expert_used; ++i) {
- ggml_tensor * cur_expert = ggml_view_2d(ctx, experts, n_embd, n_tokens,
- experts->nb[2], i*experts->nb[1]);
- if (i == 0) {
- moe_out = cur_expert;
- } else {
- moe_out = ggml_add(ctx, moe_out, cur_expert);
- }
- }
- if (n_expert_used == 1) {
- // avoid returning a non-contiguous tensor
- moe_out = ggml_cont(ctx, moe_out);
- }
- return moe_out;
- }
- static struct ggml_tensor * llm_build_kqv(
- struct ggml_context * ctx,
- struct llama_context & lctx,
- const llama_kv_cache & kv,
- struct ggml_cgraph * graph,
- struct ggml_tensor * wo,
- struct ggml_tensor * wo_b,
- struct ggml_tensor * q_cur,
- struct ggml_tensor * kq_mask,
- int32_t n_tokens,
- int32_t n_kv,
- float kq_scale,
- const llm_build_cb & cb,
- int il) {
- const llama_model & model = lctx.model;
- const llama_hparams & hparams = lctx.model.hparams;
- const llama_cparams & cparams = lctx.cparams;
- const int64_t n_ctx = cparams.n_ctx;
- const int64_t n_head = hparams.n_head(il);
- const int64_t n_head_kv = hparams.n_head_kv(il);
- const int64_t n_embd_head_k = hparams.n_embd_head_k;
- const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
- const int64_t n_embd_head_v = hparams.n_embd_head_v;
- const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
- struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
- cb(q, "q", il);
- struct ggml_tensor * k =
- ggml_view_3d(ctx, kv.k_l[il],
- n_embd_head_k, n_kv, n_head_kv,
- ggml_row_size(kv.k_l[il]->type, n_embd_k_gqa),
- ggml_row_size(kv.k_l[il]->type, n_embd_head_k),
- 0);
- cb(k, "k", il);
- struct ggml_tensor * cur;
- if (cparams.flash_attn) {
- GGML_UNUSED(model);
- GGML_UNUSED(n_ctx);
- // split cached v into n_head heads (not transposed)
- struct ggml_tensor * v =
- ggml_view_3d(ctx, kv.v_l[il],
- n_embd_head_v, n_kv, n_head_kv,
- ggml_row_size(kv.v_l[il]->type, n_embd_v_gqa),
- ggml_row_size(kv.v_l[il]->type, n_embd_head_v),
- 0);
- cb(v, "v", il);
- cur = ggml_flash_attn_ext(ctx, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias,
- hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f);
- if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_GEMMA2) {
- ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32);
- }
- cur = ggml_reshape_2d(ctx, cur, n_embd_head_v*n_head, n_tokens);
- } else {
- struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
- cb(kq, "kq", il);
- if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2 || model.arch == LLM_ARCH_NEMOTRON || model.arch == LLM_ARCH_CHATGLM) {
- // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
- // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
- ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
- }
- if (model.arch == LLM_ARCH_GROK) {
- // need to do the following:
- // multiply by attn_output_multiplyer of 0.08838834764831845
- // and then :
- // kq = 30 * tanh(kq / 30)
- // before the softmax below
- //try from phi2
- //ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
- kq = ggml_tanh(ctx, ggml_scale(ctx, kq, 0.08838834764831845f/30.0f));
- kq = ggml_scale(ctx, kq, 30);
- }
- if (hparams.attn_soft_cap) {
- kq = ggml_scale(ctx, kq, 1.0f / hparams.f_attn_logit_softcapping);
- kq = ggml_tanh(ctx, kq);
- kq = ggml_scale(ctx, kq, hparams.f_attn_logit_softcapping);
- }
- kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias);
- cb(kq, "kq_soft_max_ext", il);
- GGML_ASSERT(kv.size == n_ctx);
- // split cached v into n_head heads
- struct ggml_tensor * v =
- ggml_view_3d(ctx, kv.v_l[il],
- n_kv, n_embd_head_v, n_head_kv,
- ggml_element_size(kv.v_l[il])*n_ctx,
- ggml_element_size(kv.v_l[il])*n_ctx*n_embd_head_v,
- 0);
- cb(v, "v", il);
- struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
- cb(kqv, "kqv", il);
- struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
- cb(kqv_merged, "kqv_merged", il);
- cur = ggml_cont_2d(ctx, kqv_merged, n_embd_head_v*n_head, n_tokens);
- cb(cur, "kqv_merged_cont", il);
- }
- ggml_build_forward_expand(graph, cur);
- if (wo) {
- cur = llm_build_lora_mm(lctx, ctx, wo, cur);
- }
- if (wo_b) {
- cb(cur, "kqv_wo", il);
- }
- if (wo_b) {
- cur = ggml_add(ctx, cur, wo_b);
- }
- return cur;
- }
- static struct ggml_tensor * llm_build_kv(
- struct ggml_context * ctx,
- struct llama_context & lctx,
- const llama_kv_cache & kv,
- struct ggml_cgraph * graph,
- struct ggml_tensor * wo,
- struct ggml_tensor * wo_b,
- struct ggml_tensor * k_cur,
- struct ggml_tensor * v_cur,
- struct ggml_tensor * q_cur,
- struct ggml_tensor * kq_mask,
- int32_t n_tokens,
- int32_t kv_head,
- int32_t n_kv,
- float kq_scale,
- const llm_build_cb & cb,
- int il) {
- const llama_hparams & hparams = lctx.model.hparams;
- const llama_cparams & cparams = lctx.cparams;
- // these nodes are added to the graph together so that they are not reordered
- // by doing so, the number of splits in the graph is reduced
- ggml_build_forward_expand(graph, q_cur);
- ggml_build_forward_expand(graph, k_cur);
- ggml_build_forward_expand(graph, v_cur);
- llm_build_kv_store(ctx, hparams, cparams, kv, graph, k_cur, v_cur, n_tokens, kv_head, cb, il);
- struct ggml_tensor * cur;
- cur = llm_build_kqv(ctx, lctx, kv, graph, wo, wo_b, q_cur, kq_mask, n_tokens, n_kv, kq_scale, cb, il);
- cb(cur, "kqv_out", il);
- return cur;
- }
- static struct ggml_tensor * llm_build_copy_mask_state(
- struct ggml_context * ctx,
- struct ggml_cgraph * graph,
- struct ggml_tensor * s,
- struct ggml_tensor * state_copy,
- struct ggml_tensor * state_mask,
- int32_t n_state,
- int32_t kv_size,
- int32_t kv_head,
- int32_t n_kv,
- int32_t n_seqs) {
- struct ggml_tensor * states = ggml_reshape_2d(ctx, s, n_state, kv_size);
- // copy states
- // NOTE: assuming the copy destinations are ALL contained between kv_head and kv_head + n_kv
- // this shrinks the tensors's ne[1] to n_kv
- states = ggml_get_rows(ctx, states, state_copy);
- // clear states of sequences which are starting at the beginning of this batch
- // FIXME: zero-out NANs?
- states = ggml_mul(ctx, states, state_mask);
- // copy states which won't be changed further (between n_seqs and n_kv)
- ggml_build_forward_expand(graph,
- ggml_cpy(ctx,
- ggml_view_1d(ctx, states, n_state*(n_kv - n_seqs), n_seqs*n_state*ggml_element_size(states)),
- ggml_view_1d(ctx, s, n_state*(n_kv - n_seqs), (kv_head + n_seqs)*n_state*ggml_element_size(s))));
- // the part of the states that will be used and modified
- return ggml_view_2d(ctx, states, n_state, n_seqs, states->nb[1], 0);
- }
- // TODO: split
- static struct ggml_tensor * llm_build_mamba(
- struct ggml_context * ctx,
- struct llama_context & lctx,
- const llama_ubatch & batch,
- struct ggml_cgraph * graph,
- struct ggml_tensor * cur,
- struct ggml_tensor * state_copy,
- struct ggml_tensor * state_mask,
- int32_t kv_head,
- int32_t n_kv,
- const llm_build_cb & cb,
- int il) {
- const llama_model & model = lctx.model;
- const llama_hparams & hparams = model.hparams;
- const llama_kv_cache & kv = lctx.kv_self;
- const int64_t d_conv = hparams.ssm_d_conv;
- const int64_t d_inner = hparams.ssm_d_inner;
- const int64_t d_state = hparams.ssm_d_state;
- const int64_t dt_rank = hparams.ssm_dt_rank;
- const int64_t n_seqs = batch.n_seqs;
- // Some variants of Mamba arch (e.g. FalconMamba do apply layer norm on B and Dt layers)
- const bool ssm_dt_b_c_rms = hparams.ssm_dt_b_c_rms;
- // Use the same RMS norm as the final layer norm
- const float norm_rms_eps = hparams.f_norm_rms_eps;
- const int64_t n_seq_tokens = batch.n_seq_tokens;
- GGML_ASSERT(n_seqs != 0);
- GGML_ASSERT(batch.equal_seqs);
- GGML_ASSERT(batch.n_tokens == n_seq_tokens * n_seqs);
- struct ggml_tensor * conv_states_all = kv.k_l[il];
- struct ggml_tensor * ssm_states_all = kv.v_l[il];
- // (ab)using the KV cache to store the states
- struct ggml_tensor * conv = llm_build_copy_mask_state(ctx,
- graph, conv_states_all, state_copy, state_mask,
- hparams.n_embd_k_s(), kv.size, kv_head, n_kv, n_seqs);
- conv = ggml_reshape_3d(ctx, conv, d_conv - 1, d_inner, n_seqs);
- struct ggml_tensor * ssm = llm_build_copy_mask_state(ctx,
- graph, ssm_states_all, state_copy, state_mask,
- hparams.n_embd_v_s(), kv.size, kv_head, n_kv, n_seqs);
- ssm = ggml_reshape_3d(ctx, ssm, d_state, d_inner, n_seqs);
- // {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs}
- cur = ggml_reshape_3d(ctx, cur, cur->ne[0], n_seq_tokens, n_seqs);
- // {n_embd, 2*d_inner} @ {n_embd, n_seq_tokens, n_seqs} => {2*d_inner, n_seq_tokens, n_seqs}
- struct ggml_tensor * xz = llm_build_lora_mm(lctx, ctx, model.layers[il].ssm_in, cur);
- // split the above in two
- // => {d_inner, n_seq_tokens, n_seqs}
- struct ggml_tensor * x = ggml_view_3d(ctx, xz, d_inner, xz->ne[1], xz->ne[2], xz->nb[1], xz->nb[2], 0);
- struct ggml_tensor * z = ggml_view_3d(ctx, xz, d_inner, xz->ne[1], xz->ne[2], xz->nb[1], xz->nb[2], d_inner*ggml_element_size(xz));
- // conv
- {
- // => {d_conv - 1 + n_seq_tokens, d_inner, n_seqs}
- struct ggml_tensor * conv_x = ggml_concat(ctx, conv, ggml_transpose(ctx, x), 0);
- // copy last (d_conv - 1) columns back into the state cache
- struct ggml_tensor * last_conv = ggml_view_3d(ctx, conv_x, d_conv - 1, d_inner, n_seqs, conv_x->nb[1], conv_x->nb[2], n_seq_tokens*(conv_x->nb[0]));
- ggml_build_forward_expand(graph,
- ggml_cpy(ctx, last_conv,
- ggml_view_1d(ctx, conv_states_all,
- (d_conv - 1)*(d_inner)*(n_seqs),
- kv_head*(d_conv - 1)*(d_inner)*ggml_element_size(conv_states_all))));
- // 1D convolution
- // The equivalent is to make a self-overlapping view of conv_x
- // over d_conv columns at each stride in the 3rd dimension,
- // then element-wise multiply that with the conv1d weight,
- // then sum the elements of each row,
- // (the last two steps are a dot product over rows (also doable with mul_mat))
- // then permute away the ne[0] dimension,
- // and then you're left with the resulting x tensor.
- // For simultaneous sequences, all sequences need to have the same length.
- x = ggml_ssm_conv(ctx, conv_x, model.layers[il].ssm_conv1d);
- // bias
- x = ggml_add(ctx, x, model.layers[il].ssm_conv1d_b);
- x = ggml_silu(ctx, x);
- }
- // ssm
- {
- // {d_inner, dt_rank + 2*d_state} @ {d_inner, n_seq_tokens, n_seqs} => {dt_rank + 2*d_state, n_seq_tokens, n_seqs}
- struct ggml_tensor * x_db = llm_build_lora_mm(lctx, ctx, model.layers[il].ssm_x, x);
- // split
- struct ggml_tensor * dt = ggml_view_3d(ctx, x_db, dt_rank, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], 0);
- struct ggml_tensor * B = ggml_view_3d(ctx, x_db, d_state, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*dt_rank);
- struct ggml_tensor * C = ggml_view_3d(ctx, x_db, d_state, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*(dt_rank+d_state));
- // Some Mamba variants (e.g. FalconMamba) apply RMS norm in B, C & Dt layers
- if (ssm_dt_b_c_rms) {
- dt = ggml_rms_norm(ctx, dt, norm_rms_eps);
- B = ggml_rms_norm(ctx, B, norm_rms_eps);
- C = ggml_rms_norm(ctx, C, norm_rms_eps);
- }
- // {dt_rank, d_inner} @ {dt_rank, n_seq_tokens, n_seqs} => {d_inner, n_seq_tokens, n_seqs}
- dt = llm_build_lora_mm(lctx, ctx, model.layers[il].ssm_dt, dt);
- dt = ggml_add(ctx, dt, model.layers[il].ssm_dt_b);
- // Custom operator to optimize the parallel associative scan
- // as described in the Annex D of the Mamba paper.
- // => {d_inner, n_seq_tokens, n_seqs} and {d_state, d_inner, n_seqs}
- struct ggml_tensor * y_ssm = ggml_ssm_scan(ctx, ssm, x, dt, model.layers[il].ssm_a, B, C);
- // store last states
- ggml_build_forward_expand(graph,
- ggml_cpy(ctx,
- ggml_view_1d(ctx, y_ssm, d_state*d_inner*n_seqs, x->nb[3]),
- ggml_view_1d(ctx, ssm_states_all, d_state*d_inner*n_seqs, kv_head*d_state*d_inner*ggml_element_size(ssm_states_all))));
- struct ggml_tensor * y = ggml_view_3d(ctx, y_ssm, d_inner, n_seq_tokens, n_seqs, x->nb[1], x->nb[2], 0);
- // TODO: skip computing output earlier for unused tokens
- // {d_inner, n_seq_tokens, n_seqs} * {d_inner} => {d_inner, n_seq_tokens, n_seqs}
- y = ggml_add(ctx, y, ggml_mul(ctx, x, model.layers[il].ssm_d));
- y = ggml_mul(ctx, y, ggml_silu(ctx, ggml_cont(ctx, z)));
- // {d_inner, n_embd} @ {d_inner, n_seq_tokens, n_seqs} => {n_embd, n_seq_tokens, n_seqs}
- cur = llm_build_lora_mm(lctx, ctx, model.layers[il].ssm_out, y);
- }
- // {n_embd, n_seq_tokens, n_seqs} => {n_embd, n_tokens}
- cur = ggml_reshape_2d(ctx, cur, cur->ne[0], n_seq_tokens * n_seqs);
- cb(cur, "mamba_out", il);
- return cur;
- }
- static struct ggml_tensor * llm_build_rwkv6_time_mix(
- struct llama_context & lctx,
- struct ggml_context * ctx,
- const struct llama_layer * layer,
- struct ggml_tensor * cur,
- struct ggml_tensor * x_prev,
- struct ggml_tensor ** wkv_state) {
- size_t n_embd = cur->ne[0];
- size_t n_seq_tokens = cur->ne[1];
- size_t n_seqs = cur->ne[2];
- size_t head_size = layer->time_mix_first->ne[0];
- size_t head_count = layer->time_mix_first->ne[1];
- size_t n_tokens = n_seqs * n_seq_tokens;
- struct ggml_tensor * sx = ggml_sub(ctx, x_prev, cur);
- sx = ggml_reshape_2d(ctx, sx, n_embd, n_tokens);
- cur = ggml_reshape_2d(ctx, cur, n_embd, n_tokens);
- struct ggml_tensor * xxx = ggml_add(ctx, ggml_mul(ctx, sx, layer->time_mix_lerp_x), cur);
- xxx = ggml_reshape_4d(
- ctx,
- ggml_tanh(
- ctx,
- ggml_mul_mat(ctx, layer->time_mix_w1, xxx)
- ),
- layer->time_mix_w1->ne[1] / 5, 1, 5, n_tokens
- );
- xxx = ggml_cont(ctx, ggml_permute(ctx, xxx, 0, 1, 3, 2));
- xxx = ggml_mul_mat(
- ctx,
- ggml_reshape_4d(
- ctx,
- layer->time_mix_w2,
- layer->time_mix_w2->ne[0], layer->time_mix_w2->ne[1], 1, 5
- ),
- xxx
- );
- struct ggml_tensor *mw = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], 0);
- struct ggml_tensor *mk = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float));
- struct ggml_tensor *mv = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float));
- struct ggml_tensor *mr = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float));
- struct ggml_tensor *mg = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float));
- struct ggml_tensor * xw = ggml_add(
- ctx,
- ggml_mul(
- ctx,
- ggml_add(ctx, mw, layer->time_mix_lerp_w),
- sx
- ),
- cur
- );
- struct ggml_tensor * xk = ggml_add(
- ctx,
- ggml_mul(
- ctx,
- ggml_add(ctx, mk, layer->time_mix_lerp_k),
- sx
- ),
- cur
- );
- struct ggml_tensor * xv = ggml_add(
- ctx,
- ggml_mul(
- ctx,
- ggml_add(ctx, mv, layer->time_mix_lerp_v),
- sx
- ),
- cur
- );
- struct ggml_tensor * xr = ggml_add(
- ctx,
- ggml_mul(
- ctx,
- ggml_add(ctx, mr, layer->time_mix_lerp_r),
- sx
- ),
- cur
- );
- struct ggml_tensor * xg = ggml_add(
- ctx,
- ggml_mul(
- ctx,
- ggml_add(ctx, mg, layer->time_mix_lerp_g),
- sx
- ),
- cur
- );
- struct ggml_tensor * r = ggml_reshape_4d(ctx, llm_build_lora_mm(lctx, ctx, layer->time_mix_receptance, xr), head_size, 1, head_count, n_tokens);
- struct ggml_tensor * k = ggml_reshape_4d(ctx, llm_build_lora_mm(lctx, ctx, layer->time_mix_key, xk), 1, head_size, head_count, n_tokens);
- struct ggml_tensor * v = ggml_reshape_4d(ctx, llm_build_lora_mm(lctx, ctx, layer->time_mix_value, xv), head_size, 1, head_count, n_tokens);
- struct ggml_tensor * g = ggml_silu(
- ctx,
- llm_build_lora_mm(lctx, ctx, layer->time_mix_gate, xg)
- );
- struct ggml_tensor * w = ggml_mul_mat(
- ctx,
- layer->time_mix_decay_w2,
- ggml_tanh(
- ctx,
- ggml_mul_mat(ctx, layer->time_mix_decay_w1, xw)
- )
- );
- w = ggml_add(ctx, w, ggml_reshape_1d(ctx, layer->time_mix_decay, n_embd));
- w = ggml_exp(ctx, ggml_neg(ctx, ggml_exp(ctx, w)));
- w = ggml_reshape_4d(ctx, w, 1, head_size, head_count, n_tokens);
- k = ggml_transpose(ctx, k);
- v = ggml_transpose(ctx, v);
- r = ggml_transpose(ctx, r);
- struct ggml_tensor * wkv_output = ggml_rwkv_wkv(ctx, k, v, r, layer->time_mix_first, w, *wkv_state);
- cur = ggml_view_1d(ctx, wkv_output, n_embd * n_tokens, 0);
- *wkv_state = ggml_view_1d(ctx, wkv_output, n_embd * head_size * n_seqs, n_embd * n_tokens * sizeof(float));
- // group norm with head_count groups
- cur = ggml_reshape_3d(ctx, cur, n_embd / head_count, head_count, n_tokens);
- cur = ggml_norm(ctx, cur, 64e-5f);
- // Convert back to regular vectors.
- cur = ggml_reshape_2d(ctx, cur, n_embd, n_tokens);
- cur = ggml_add(ctx, ggml_mul(ctx, cur, layer->time_mix_ln), layer->time_mix_ln_b);
- cur = ggml_mul(ctx, cur, g);
- cur = llm_build_lora_mm(lctx, ctx, layer->time_mix_output, cur);
- return ggml_reshape_3d(ctx, cur, n_embd, n_seq_tokens, n_seqs);
- }
- static struct ggml_tensor * llm_build_rwkv6_channel_mix(
- struct llama_context & lctx,
- struct ggml_context * ctx,
- const struct llama_layer * layer,
- struct ggml_tensor * cur,
- struct ggml_tensor * x_prev) {
- struct ggml_tensor * sx = ggml_sub(ctx, x_prev, cur);
- struct ggml_tensor * xk = ggml_add(ctx, ggml_mul(ctx, sx, layer->channel_mix_lerp_k), cur);
- struct ggml_tensor * xr = ggml_add(ctx, ggml_mul(ctx, sx, layer->channel_mix_lerp_r), cur);
- struct ggml_tensor * r = ggml_sigmoid(ctx, llm_build_lora_mm(lctx, ctx, layer->channel_mix_receptance, xr));
- struct ggml_tensor * k = ggml_sqr(
- ctx,
- ggml_relu(
- ctx,
- llm_build_lora_mm(lctx, ctx, layer->channel_mix_key, xk)
- )
- );
- return ggml_mul(ctx, r, llm_build_lora_mm(lctx, ctx, layer->channel_mix_value, k));
- }
- struct llm_build_context {
- const llama_model & model;
- llama_context & lctx;
- const llama_hparams & hparams;
- const llama_cparams & cparams;
- const llama_ubatch & batch;
- const llama_kv_cache & kv_self;
- const int64_t n_embd;
- const int64_t n_layer;
- const int64_t n_rot;
- const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
- const int64_t n_head;
- const int64_t n_head_kv;
- const int64_t n_embd_head_k;
- const int64_t n_embd_k_gqa;
- const int64_t n_embd_head_v;
- const int64_t n_embd_v_gqa;
- const int64_t n_expert;
- const int64_t n_expert_used;
- const float freq_base;
- const float freq_scale;
- const float ext_factor;
- const float attn_factor;
- const float beta_fast;
- const float beta_slow;
- const float norm_eps;
- const float norm_rms_eps;
- const int32_t n_tokens;
- const int32_t n_kv; // size of KV cache to consider (n_kv <= kv_self.size)
- const int32_t n_outputs;
- const int32_t n_outputs_enc;
- const int32_t kv_head; // index of where we store new KV data in the cache
- const int32_t n_ctx_orig;
- const bool flash_attn;
- const enum llama_pooling_type pooling_type;
- const enum llama_rope_type rope_type;
- const llm_build_cb & cb;
- std::vector<uint8_t> & buf_compute_meta;
- struct ggml_context * ctx0 = nullptr;
- // TODO: consider making the entire interface noexcept
- llm_build_context(
- llama_context & lctx,
- const llama_ubatch & batch,
- const llm_build_cb & cb,
- bool worst_case) :
- model (lctx.model),
- lctx (lctx),
- hparams (model.hparams),
- cparams (lctx.cparams),
- batch (batch),
- kv_self (lctx.kv_self),
- n_embd (hparams.n_embd),
- n_layer (hparams.n_layer),
- n_rot (hparams.n_rot),
- n_ctx (cparams.n_ctx),
- n_head (hparams.n_head()),
- n_head_kv (hparams.n_head_kv()),
- n_embd_head_k (hparams.n_embd_head_k),
- n_embd_k_gqa (hparams.n_embd_k_gqa()),
- n_embd_head_v (hparams.n_embd_head_v),
- n_embd_v_gqa (hparams.n_embd_v_gqa()),
- n_expert (hparams.n_expert),
- n_expert_used (hparams.n_expert_used),
- freq_base (cparams.rope_freq_base),
- freq_scale (cparams.rope_freq_scale),
- ext_factor (cparams.yarn_ext_factor),
- attn_factor (cparams.yarn_attn_factor),
- beta_fast (cparams.yarn_beta_fast),
- beta_slow (cparams.yarn_beta_slow),
- norm_eps (hparams.f_norm_eps),
- norm_rms_eps (hparams.f_norm_rms_eps),
- n_tokens (batch.n_tokens),
- n_kv (worst_case ? kv_self.size : kv_self.n),
- n_outputs (worst_case ? n_tokens : lctx.n_outputs),
- n_outputs_enc (worst_case ? n_tokens : lctx.embd_enc.size() / hparams.n_embd),
- kv_head (worst_case ? (kv_self.recurrent ? 0 : kv_self.size - n_tokens) : kv_self.head),
- n_ctx_orig (cparams.n_ctx_orig_yarn),
- flash_attn (cparams.flash_attn),
- pooling_type (cparams.pooling_type),
- rope_type (hparams.rope_type),
- cb (cb),
- buf_compute_meta (lctx.buf_compute_meta) {
- // all initializations should be done in init()
- }
- void init() {
- struct ggml_init_params params = {
- /*.mem_size =*/ buf_compute_meta.size(),
- /*.mem_buffer =*/ buf_compute_meta.data(),
- /*.no_alloc =*/ true,
- };
- ctx0 = ggml_init(params);
- lctx.inp_tokens = nullptr;
- lctx.inp_embd = nullptr;
- lctx.inp_pos = nullptr;
- lctx.inp_out_ids = nullptr;
- lctx.inp_KQ_mask = nullptr;
- lctx.inp_KQ_mask_swa = nullptr;
- lctx.inp_K_shift = nullptr;
- lctx.inp_mean = nullptr;
- lctx.inp_cls = nullptr;
- lctx.inp_s_copy = nullptr;
- lctx.inp_s_mask = nullptr;
- lctx.inp_s_seq = nullptr;
- lctx.inp_pos_bucket = nullptr;
- lctx.inp_embd_enc = nullptr;
- lctx.inp_KQ_mask_cross = nullptr;
- lctx.inp_cross_attn_state = nullptr;
- }
- void free() {
- if (ctx0) {
- ggml_free(ctx0);
- ctx0 = nullptr;
- }
- }
- struct ggml_cgraph * build_k_shift() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- GGML_ASSERT(kv_self.size == n_ctx);
- lctx.inp_K_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_ctx);
- cb(lctx.inp_K_shift, "K_shift", -1);
- ggml_set_input(lctx.inp_K_shift);
- for (int il = 0; il < n_layer; ++il) {
- const int64_t n_head_kv = hparams.n_head_kv(il);
- const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
- struct ggml_tensor * rope_factors = build_rope_factors(il);
- struct ggml_tensor * k =
- ggml_view_3d(ctx0, kv_self.k_l[il],
- n_embd_head_k, n_head_kv, n_ctx,
- ggml_row_size(kv_self.k_l[il]->type, n_embd_head_k),
- ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa),
- 0);
- struct ggml_tensor * tmp;
- if (ggml_is_quantized(k->type)) {
- // dequantize to f32 -> RoPE -> quantize back
- tmp = ggml_cast(ctx0, k, GGML_TYPE_F32);
- cb(tmp, "K_f32", il);
- for (auto * backend : lctx.backends) {
- // Figure out which backend KV cache belongs to
- if (ggml_backend_supports_buft(backend, lctx.model.buft_layer[il].buft)) {
- ggml_backend_sched_set_tensor_backend(lctx.sched, tmp, backend);
- break;
- }
- }
- tmp = ggml_rope_ext_inplace(ctx0, tmp,
- lctx.inp_K_shift, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow);
- cb(tmp, "K_shifted_f32", il);
- tmp = ggml_cpy(ctx0, tmp, k);
- } else {
- // we rotate only the first n_rot dimensions
- tmp = ggml_rope_ext_inplace(ctx0, k,
- lctx.inp_K_shift, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow);
- }
- cb(tmp, "K_shifted", il);
- ggml_build_forward_expand(gf, tmp);
- }
- return gf;
- }
- struct ggml_cgraph * build_defrag(const std::vector<uint32_t> & ids) {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- for (uint32_t i = 0; i < ids.size(); ++i) {
- const uint32_t id = ids[i];
- if (i == id || id == ids.size()) {
- continue;
- }
- uint32_t nm = 1;
- while (i + nm < ids.size() && ids[i + nm] == id + nm) {
- nm++;
- }
- for (int il = 0; il < n_layer; ++il) {
- const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
- const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
- ggml_tensor * view_k_src = ggml_view_2d(ctx0, kv_self.k_l[il],
- n_embd_k_gqa, nm,
- ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa),
- ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*i));
- ggml_tensor * view_k_dst = ggml_view_2d(ctx0, kv_self.k_l[il],
- n_embd_k_gqa, nm,
- ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa),
- ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*id));
- ggml_tensor * view_v_src;
- ggml_tensor * view_v_dst;
- if (flash_attn) {
- // NOTE: the V cache is not transposed when using flash attention
- view_v_src = ggml_view_2d(ctx0, kv_self.v_l[il],
- n_embd_v_gqa, nm,
- ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa),
- ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*i));
- view_v_dst = ggml_view_2d(ctx0, kv_self.v_l[il],
- n_embd_v_gqa, nm,
- ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa),
- ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa*id));
- } else {
- view_v_src = ggml_view_2d(ctx0, kv_self.v_l[il],
- nm, n_embd_v_gqa,
- ggml_row_size(kv_self.v_l[il]->type, kv_self.size),
- ggml_row_size(kv_self.v_l[il]->type, i));
- view_v_dst = ggml_view_2d(ctx0, kv_self.v_l[il],
- nm, n_embd_v_gqa,
- ggml_row_size(kv_self.v_l[il]->type, kv_self.size),
- ggml_row_size(kv_self.v_l[il]->type, id));
- }
- ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_k_src, view_k_dst));
- ggml_build_forward_expand(gf, ggml_cpy(ctx0, view_v_src, view_v_dst));
- }
- i += nm - 1;
- }
- //LLAMA_LOG_INFO("gf->n_nodes = %d\n", gf->n_nodes);
- return gf;
- }
- struct ggml_tensor * build_inp_pos() {
- lctx.inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
- cb(lctx.inp_pos, "inp_pos", -1);
- ggml_set_input(lctx.inp_pos);
- return lctx.inp_pos;
- }
- struct ggml_tensor * build_rope_factors(int il) {
- // choose long/short freq factors based on the context size
- const auto n_ctx_pre_seq = cparams.n_ctx / cparams.n_seq_max;
- if (model.layers[il].rope_freqs != nullptr) {
- return model.layers[il].rope_freqs;
- }
- if (n_ctx_pre_seq > hparams.n_ctx_orig_yarn) {
- return model.layers[il].rope_long;
- }
- return model.layers[il].rope_short;
- }
- struct ggml_tensor * build_inp_out_ids() {
- lctx.inp_out_ids = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs);
- cb(lctx.inp_out_ids, "inp_out_ids", -1);
- ggml_set_input(lctx.inp_out_ids);
- return lctx.inp_out_ids;
- }
- struct ggml_tensor * build_inp_KQ_mask(bool causal = true) {
- lctx.inp_KQ_mask = causal
- ? ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD))
- : ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
- cb(lctx.inp_KQ_mask, "KQ_mask", -1);
- ggml_set_input(lctx.inp_KQ_mask);
- return flash_attn ? ggml_cast(ctx0, lctx.inp_KQ_mask, GGML_TYPE_F16) : lctx.inp_KQ_mask;
- }
- struct ggml_tensor * build_inp_KQ_mask_swa(bool causal = true) {
- GGML_ASSERT(hparams.n_swa > 0);
- lctx.inp_KQ_mask_swa = causal
- ? ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD))
- : ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
- cb(lctx.inp_KQ_mask_swa, "KQ_mask_swa", -1);
- ggml_set_input(lctx.inp_KQ_mask_swa);
- return flash_attn ? ggml_cast(ctx0, lctx.inp_KQ_mask_swa, GGML_TYPE_F16) : lctx.inp_KQ_mask_swa;
- }
- struct ggml_tensor * build_inp_mean() {
- lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
- cb(lctx.inp_mean, "inp_mean", -1);
- ggml_set_input(lctx.inp_mean);
- return lctx.inp_mean;
- }
- struct ggml_tensor * build_inp_cls() {
- lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
- cb(lctx.inp_cls, "inp_cls", -1);
- ggml_set_input(lctx.inp_cls);
- return lctx.inp_cls;
- }
- struct ggml_tensor * build_inp_s_copy() {
- lctx.inp_s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_kv);
- cb(lctx.inp_s_copy, "inp_s_copy", -1);
- ggml_set_input(lctx.inp_s_copy);
- return lctx.inp_s_copy;
- }
- struct ggml_tensor * build_inp_s_mask() {
- lctx.inp_s_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 1, n_kv);
- cb(lctx.inp_s_mask, "inp_s_mask", -1);
- ggml_set_input(lctx.inp_s_mask);
- return lctx.inp_s_mask;
- }
- struct ggml_cgraph * append_pooling(struct ggml_cgraph * gf) {
- // find result_norm tensor for input
- struct ggml_tensor * inp = nullptr;
- for (int i = ggml_graph_n_nodes(gf) - 1; i >= 0; --i) {
- inp = ggml_graph_node(gf, i);
- if (strcmp(inp->name, "result_norm") == 0 || strcmp(inp->name, "result_embd") == 0) {
- break;
- } else {
- inp = nullptr;
- }
- }
- GGML_ASSERT(inp != nullptr && "missing result_norm/result_embd tensor");
- struct ggml_tensor * cur;
- switch (pooling_type) {
- case LLAMA_POOLING_TYPE_NONE:
- {
- cur = inp;
- } break;
- case LLAMA_POOLING_TYPE_MEAN:
- {
- struct ggml_tensor * inp_mean = build_inp_mean();
- cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, inp)), inp_mean);
- } break;
- case LLAMA_POOLING_TYPE_CLS:
- case LLAMA_POOLING_TYPE_LAST:
- {
- struct ggml_tensor * inp_cls = build_inp_cls();
- cur = ggml_get_rows(ctx0, inp, inp_cls);
- } break;
- case LLAMA_POOLING_TYPE_RANK:
- {
- struct ggml_tensor * inp_cls = build_inp_cls();
- inp = ggml_get_rows(ctx0, inp, inp_cls);
- // classification head
- // https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566
- GGML_ASSERT(model.cls != nullptr);
- GGML_ASSERT(model.cls_b != nullptr);
- cur = ggml_add (ctx0, ggml_mul_mat(ctx0, model.cls, inp), model.cls_b);
- cur = ggml_tanh(ctx0, cur);
- // some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
- // https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896
- if (model.cls_out) {
- GGML_ASSERT(model.cls_out_b != nullptr);
- cur = ggml_add (ctx0, ggml_mul_mat(ctx0, model.cls_out, cur), model.cls_out_b);
- }
- } break;
- default:
- {
- GGML_ABORT("unknown pooling type");
- }
- }
- cb(cur, "result_embd_pooled", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_tensor * llm_build_pos_bucket(bool causal) {
- if (causal) {
- lctx.inp_pos_bucket = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens);
- } else {
- lctx.inp_pos_bucket = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_tokens);
- }
- ggml_set_input(lctx.inp_pos_bucket);
- cb(lctx.inp_pos_bucket, "pos_bucket", -1);
- return lctx.inp_pos_bucket;
- }
- struct ggml_tensor * llm_build_pos_bias(struct ggml_tensor * pos_bucket, struct ggml_tensor * attn_rel_b) {
- struct ggml_tensor * pos_bucket_1d = ggml_view_1d(ctx0, pos_bucket, pos_bucket->ne[0] * pos_bucket->ne[1], 0);
- cb(pos_bucket_1d, "pos_bucket_1d", -1);
- struct ggml_tensor * pos_bias = ggml_get_rows(ctx0, attn_rel_b, pos_bucket_1d);
- cb(pos_bias, "pos_bias", -1);
- pos_bias = ggml_view_3d(ctx0, pos_bias, pos_bias->ne[0], lctx.inp_pos_bucket->ne[0], lctx.inp_pos_bucket->ne[1], ggml_element_size(pos_bias) * pos_bias->ne[0], ggml_element_size(pos_bias) * pos_bias->ne[0] * lctx.inp_pos_bucket->ne[0], 0);
- cb(pos_bias, "pos_bias", -1);
- pos_bias = ggml_permute(ctx0, pos_bias, 2, 0, 1, 3);
- cb(pos_bias, "pos_bias", -1);
- pos_bias = ggml_cont(ctx0, pos_bias);
- cb(pos_bias, "pos_bias", -1);
- return pos_bias;
- }
- struct ggml_tensor * llm_build_inp_embd_enc() {
- const int64_t n_embd = hparams.n_embd;
- lctx.inp_embd_enc = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_outputs_enc);
- ggml_set_input(lctx.inp_embd_enc);
- cb(lctx.inp_embd_enc, "embd_enc", -1);
- return lctx.inp_embd_enc;
- }
- struct ggml_tensor * llm_build_inp_KQ_mask_cross() {
- lctx.inp_KQ_mask_cross = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_outputs_enc, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
- ggml_set_input(lctx.inp_KQ_mask_cross);
- cb(lctx.inp_KQ_mask_cross, "KQ_mask_cross", -1);
- return lctx.inp_KQ_mask_cross;
- }
- struct ggml_cgraph * build_llama() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // rope freq factors for llama3; may return nullptr for llama2 and other models
- struct ggml_tensor * rope_factors = build_rope_factors(il);
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- if (model.layers[il].bq) {
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- }
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- if (model.layers[il].bk) {
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- }
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- if (model.layers[il].bv) {
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- // For Granite architecture
- if (hparams.f_residual_scale) {
- cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- if (model.layers[il].ffn_gate_inp == nullptr) {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- } else {
- // MoE branch
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_moe_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_gate_inp,
- model.layers[il].ffn_up_exps,
- model.layers[il].ffn_gate_exps,
- model.layers[il].ffn_down_exps,
- n_expert, n_expert_used,
- LLM_FFN_SILU, true,
- false, 0.0,
- cb, il);
- cb(cur, "ffn_moe_out", il);
- }
- // For Granite architecture
- if (hparams.f_residual_scale) {
- cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cb(cur, "ffn_out", il);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- // For Granite architecture
- if (hparams.f_logit_scale) {
- cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale);
- }
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_baichuan() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = model.type == MODEL_7B ? build_inp_pos() : nullptr;
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- switch (model.type) {
- case MODEL_7B:
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- break;
- case MODEL_13B:
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens);
- break;
- default:
- GGML_ABORT("fatal error");
- }
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_mllama() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- struct ggml_tensor * inpCAS;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- inpCAS = llm_build_inp_cross_attn_state(ctx0, lctx, hparams, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- if (hparams.cross_attention_layers(il)) {
- if (!batch.embd && !cparams.cross_attn) {
- continue;
- }
- // cross attention layer
- struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].cross_attn_q_proj, cur);
- cb(Qcur, "Qcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- cb(Qcur, "Qcur", il);
- Qcur = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 0, 2, 1, 3));
- cb(Qcur, "Qcur", il);
- Qcur = llm_build_norm(ctx0, Qcur, hparams, model.layers[il].cross_attn_q_norm, NULL, LLM_NORM_RMS, cb, il);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur, * Vcur;
- if (batch.embd) {
- Kcur = ggml_mul_mat(ctx0, model.layers[il].cross_attn_k_proj, inpCAS);
- cb(Kcur, "Kcur", il);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, 6404);
- cb(Kcur, "Kcur", il);
- Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3));
- cb(Kcur, "Kcur", il);
- Kcur = llm_build_norm(ctx0, Kcur, hparams, model.layers[il].cross_attn_k_norm, NULL, LLM_NORM_RMS, cb, il);
- cb(Kcur, "Kcur", il);
- ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, kv_self.k_l[il]));
- Vcur = ggml_mul_mat(ctx0, model.layers[il].cross_attn_v_proj, inpCAS);
- cb(Vcur, "Vcur", il);
- Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, 6404);
- cb(Vcur, "Vcur", il);
- Vcur = ggml_permute(ctx0, Vcur, 0, 2, 1, 3);
- cb(Vcur, "Vcur", il);
- ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, kv_self.v_l[il]));
- } else {
- Kcur = ggml_view_tensor(ctx0, kv_self.k_l[il]);
- cb(Kcur, "Kcur (view)", il);
- Vcur = ggml_view_tensor(ctx0, kv_self.v_l[il]);
- cb(Vcur, "Vcur (view)", il);
- }
- struct ggml_tensor * kq = ggml_mul_mat(ctx0, Kcur, Qcur);
- cb(kq, "kq", il);
- // TODO: apply causal masks
- struct ggml_tensor * kq_soft_max = ggml_soft_max_ext(ctx0, kq, nullptr, 1.f/sqrtf(float(n_embd_head)), hparams.f_max_alibi_bias);
- cb(kq_soft_max, "kq_soft_max", il);
- Vcur = ggml_cont(ctx0, ggml_transpose(ctx0, Vcur));
- cb(Vcur, "Vcur", il);
- struct ggml_tensor * kqv = ggml_mul_mat(ctx0, Vcur, kq_soft_max);
- cb(kqv, "kqv", il);
- struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
- cb(kqv_merged, "kqv_merged", il);
- cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_head_v*n_head, n_tokens);
- cb(cur, "kqv_merged_cont", il);
- cur = ggml_mul_mat(ctx0, model.layers[il].cross_attn_o_proj, cur);
- cb(cur, "cur", il);
- // TODO: do this in place once?
- cur = ggml_mul(ctx0, cur, ggml_tanh(ctx0, model.layers[il].cross_attn_attn_gate));
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- // TODO: do this inplace once?
- cur = ggml_add_inplace(ctx0, ggml_mul_inplace(ctx0, cur, ggml_tanh(ctx0, model.layers[il].cross_attn_mlp_gate)), ffn_inp);
- cb(cur, "ffn_out", il);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- } else {
- // self attention layer
- // rope freq factors for llama3; may return nullptr for llama2 and other models
- struct ggml_tensor * rope_factors = build_rope_factors(il);
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- if (model.layers[il].bq) {
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- }
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- if (model.layers[il].bk) {
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- }
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- if (model.layers[il].bv) {
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cb(cur, "ffn_out", il);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_xverse() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, NULL, LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_falcon() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * attn_norm;
- attn_norm = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm,
- model.layers[il].attn_norm_b,
- LLM_NORM, cb, il);
- cb(attn_norm, "attn_norm", il);
- // self-attention
- {
- if (model.layers[il].attn_norm_2) {
- // Falcon-40B
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm_2,
- model.layers[il].attn_norm_2_b,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm_2", il);
- } else {
- cur = attn_norm;
- }
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
- struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- // using mode = 2 for neox mode
- Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
- freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
- freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- attn_norm = ggml_get_rows(ctx0, attn_norm, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = cur;
- // feed forward
- {
- cur = llm_build_ffn(ctx0, lctx, attn_norm, // !! use the attn norm, not the result
- model.layers[il].ffn_up, NULL, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = ggml_add(ctx0, cur, inpL);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- // norm
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm,
- model.output_norm_b,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_grok() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // multiply by embedding_multiplier_scale of 78.38367176906169
- inpL = ggml_scale(ctx0, inpL, 78.38367176906169f);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- if (model.layers[il].bq) {
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- }
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- if (model.layers[il].bk) {
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- }
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- if (model.layers[il].bv) {
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- // Grok
- // if attn_out_norm is present then apply it before adding the input
- if (model.layers[il].attn_out_norm) {
- cur = llm_build_norm(ctx0, cur, hparams,
- model.layers[il].attn_out_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_out_norm", il);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- // MoE branch
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_moe_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_gate_inp,
- model.layers[il].ffn_up_exps,
- model.layers[il].ffn_gate_exps,
- model.layers[il].ffn_down_exps,
- n_expert, n_expert_used,
- LLM_FFN_GELU, true,
- false, 0.0,
- cb, il);
- cb(cur, "ffn_moe_out", il);
- // Grok
- // if layer_out_norm is present then apply it before adding the input
- // Idea: maybe ffn_out_norm is a better name
- if (model.layers[il].layer_out_norm) {
- cur = llm_build_norm(ctx0, cur, hparams,
- model.layers[il].layer_out_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "layer_out_norm", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cb(cur, "ffn_out", il);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- // Grok
- // multiply logits by output_multiplier_scale of 0.5773502691896257
- cur = ggml_scale(ctx0, cur, 0.5773502691896257f);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_dbrx() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- struct ggml_tensor * Qcur = nullptr;
- struct ggml_tensor * Kcur = nullptr;
- struct ggml_tensor * Vcur = nullptr;
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
- cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
- cb(cur, "wqkv_clamped", il);
- Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- // MoE branch
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].attn_out_norm, NULL,
- LLM_NORM, cb, il);
- cb(cur, "attn_out_norm", il);
- cur = llm_build_moe_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_gate_inp,
- model.layers[il].ffn_up_exps,
- model.layers[il].ffn_gate_exps,
- model.layers[il].ffn_down_exps,
- n_expert, n_expert_used,
- LLM_FFN_SILU, true,
- false, 0.0,
- cb, il);
- cb(cur, "ffn_moe_out", il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cb(cur, "ffn_out", il);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_starcoder() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- struct ggml_tensor * pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
- cb(pos, "pos_embd", -1);
- inpL = ggml_add(ctx0, inpL, pos);
- cb(inpL, "inpL", -1);
- for (int il = 0; il < n_layer; ++il) {
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm,
- model.layers[il].attn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
- cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
- cb(cur, "bqkv", il);
- struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
- // add the input
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
- cb(ffn_inp, "ffn_inp", il);
- // FF
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm,
- model.layers[il].ffn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.output_norm,
- model.output_norm_b,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_refact() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- cb(Kcur, "Kcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- cb(Qcur, "Qcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_bert() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- struct ggml_tensor * inp_pos = nullptr;
- if (model.arch != LLM_ARCH_JINA_BERT_V2) {
- inp_pos = build_inp_pos();
- }
- // construct input embeddings (token, type, position)
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // token types are hardcoded to zero ("Sentence A")
- struct ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0);
- inpL = ggml_add(ctx0, inpL, type_row0);
- if (model.arch == LLM_ARCH_BERT) {
- inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL);
- }
- cb(inpL, "inp_embd", -1);
- // embed layer norm
- inpL = llm_build_norm(ctx0, inpL, hparams, model.tok_norm, model.tok_norm_b, LLM_NORM, cb, -1);
- cb(inpL, "inp_norm", -1);
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask(false);
- // iterate layers
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * cur = inpL;
- struct ggml_tensor * Qcur;
- struct ggml_tensor * Kcur;
- struct ggml_tensor * Vcur;
- // self-attention
- if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_JINA_BERT_V2) {
- Qcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur), model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- if (model.layers[il].attn_q_norm) {
- Qcur = llm_build_norm(ctx0, Qcur, hparams,
- model.layers[il].attn_q_norm,
- model.layers[il].attn_q_norm_b,
- LLM_NORM, cb, il);
- }
- Kcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur), model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- if (model.layers[il].attn_k_norm) {
- Kcur = llm_build_norm(ctx0, Kcur, hparams,
- model.layers[il].attn_k_norm,
- model.layers[il].attn_k_norm_b,
- LLM_NORM, cb, il);
- }
- Vcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur), model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- } else {
- // compute Q and K and RoPE them
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
- Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- }
- struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
- struct ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3));
- struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
- cb(kq, "kq", il);
- kq = ggml_soft_max_ext(ctx0, kq, KQ_mask, 1.0f/sqrtf(float(n_embd_head)), hparams.f_max_alibi_bias);
- cb(kq, "kq_soft_max_ext", il);
- struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_tokens)));
- cb(v, "v", il);
- struct ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_tokens, n_embd_head, n_head_kv), kq);
- cb(kqv, "kqv", il);
- struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
- cb(kqv_merged, "kqv_merged", il);
- cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens);
- cb(cur, "kqv_merged_cont", il);
- ggml_build_forward_expand(gf, cur);
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo, cur);
- if (model.layers[il].bo) {
- cb(cur, "kqv_wo", il);
- }
- if (model.layers[il].bo) {
- cur = ggml_add(ctx0, cur, model.layers[il].bo);
- }
- cb(cur, "kqv_out", il);
- if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
- // re-add the layer input
- cur = ggml_add(ctx0, cur, inpL);
- // attention layer norm
- cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_out_norm, model.layers[il].attn_out_norm_b, LLM_NORM, cb, il);
- if (model.layers[il].attn_norm_2 != nullptr) {
- cur = ggml_add(ctx0, cur, inpL); // re-add the layer input
- cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_norm_2, model.layers[il].attn_norm_2_b, LLM_NORM, cb, il);
- }
- struct ggml_tensor * ffn_inp = cur;
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- if (model.arch == LLM_ARCH_BERT) {
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
- } else if (model.arch == LLM_ARCH_JINA_BERT_V2) {
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_GELU, LLM_FFN_PAR, cb, il);
- } else {
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- }
- cb(cur, "ffn_out", il);
- // attentions bypass the intermediate layer
- cur = ggml_add(ctx0, cur, ffn_inp);
- // output layer norm
- cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].layer_out_norm, model.layers[il].layer_out_norm_b, LLM_NORM, cb, il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cb(cur, "result_embd", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_bloom() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- inpL = llm_build_norm(ctx0, inpL, hparams,
- model.tok_norm,
- model.tok_norm_b,
- LLM_NORM, cb, -1);
- cb(inpL, "inp_norm", -1);
- for (int il = 0; il < n_layer; ++il) {
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm,
- model.layers[il].attn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
- cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
- cb(cur, "bqkv", il);
- struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
- // Add the input
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
- cb(ffn_inp, "ffn_inp", il);
- // FF
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm,
- model.layers[il].ffn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.output_norm,
- model.output_norm_b,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_mpt() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * pos;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- if (model.pos_embd) {
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
- cb(pos, "pos_embd", -1);
- inpL = ggml_add(ctx0, inpL, pos);
- cb(inpL, "inpL", -1);
- }
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * attn_norm;
- attn_norm = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm,
- model.layers[il].attn_norm_b,
- LLM_NORM, cb, il);
- cb(attn_norm, "attn_norm", il);
- // self-attention
- {
- cur = attn_norm;
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
- if (model.layers[il].bqkv){
- cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
- cb(cur, "bqkv", il);
- }
- if (hparams.f_clamp_kqv > 0.0f) {
- cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
- cb(cur, "wqkv_clamped", il);
- }
- struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- // Q/K Layernorm
- if (model.layers[il].attn_q_norm) {
- Qcur = llm_build_norm(ctx0, Qcur, hparams,
- model.layers[il].attn_q_norm,
- model.layers[il].attn_q_norm_b,
- LLM_NORM, cb, il);
- cb(Qcur, "Qcur", il);
- Kcur = llm_build_norm(ctx0, Kcur, hparams,
- model.layers[il].attn_k_norm,
- model.layers[il].attn_k_norm_b,
- LLM_NORM, cb, il);
- cb(Kcur, "Kcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- } else {
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
- // Add the input
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
- cb(ffn_inp, "ffn_inp", il);
- // feed forward
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm,
- model.layers[il].ffn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- model.layers[il].ffn_act,
- LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm,
- model.output_norm_b,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_stablelm() {
- struct ggml_cgraph * gf = ggml_new_graph(ctx0);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm,
- model.layers[il].attn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm", il);
- struct ggml_tensor * inpSA = cur;
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- if (model.layers[il].bq) {
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- }
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- if (model.layers[il].bk) {
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- }
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- if (model.layers[il].bv) {
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- }
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- cb(Qcur, "Qcur", il);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- cb(Kcur, "Kcur", il);
- if (model.layers[il].attn_q_norm) {
- Qcur = llm_build_norm(ctx0, Qcur, hparams,
- model.layers[il].attn_q_norm,
- NULL,
- LLM_NORM, cb, il);
- cb(Qcur, "Qcur", il);
- }
- if (model.layers[il].attn_k_norm) {
- Kcur = llm_build_norm(ctx0, Kcur, hparams,
- model.layers[il].attn_k_norm,
- NULL,
- LLM_NORM, cb, il);
- cb(Kcur, "Kcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- {
- if (model.layers[il].ffn_norm) {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm,
- model.layers[il].ffn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "ffn_norm", il);
- } else {
- // parallel residual
- cur = inpSA;
- }
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm,
- model.output_norm_b,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_qwen() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
- cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
- cb(cur, "bqkv", il);
- struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd)));
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- // using mode = 2 for neox mode
- Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
- freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
- freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward forward
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_qwen2() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_qwen2moe() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self_attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // MoE branch
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- ggml_tensor * moe_out =
- llm_build_moe_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_gate_inp,
- model.layers[il].ffn_up_exps,
- model.layers[il].ffn_gate_exps,
- model.layers[il].ffn_down_exps,
- n_expert, n_expert_used,
- LLM_FFN_SILU, false,
- false, 0.0,
- cb, il);
- cb(cur, "ffn_moe_out", il);
- // FFN shared expert
- {
- ggml_tensor * cur_gate_inp = llm_build_lora_mm(lctx, ctx0, model.layers[il].ffn_gate_inp_shexp, cur);
- cb(cur_gate_inp, "ffn_shexp_gate_inp", il);
- // sigmoid
- ggml_tensor * cur_gate = ggml_div(ctx0, ggml_silu(ctx0, cur_gate_inp), cur_gate_inp);
- cb(cur_gate, "ffn_shexp_gate", il);
- ggml_tensor * cur_ffn = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up_shexp, NULL, NULL,
- model.layers[il].ffn_gate_shexp, NULL, NULL,
- model.layers[il].ffn_down_shexp, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur_ffn, "ffn_shexp", il);
- ggml_tensor * ffn_shexp_out = ggml_mul(ctx0, cur_ffn, cur_gate);
- cb(ffn_shexp_out, "ffn_shexp_out", il);
- moe_out = ggml_add(ctx0, moe_out, ffn_shexp_out);
- cb(moe_out, "ffn_out", il);
- cur = moe_out;
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_phi2() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * attn_norm_output;
- struct ggml_tensor * ffn_output;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- attn_norm_output = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm,
- model.layers[il].attn_norm_b,
- LLM_NORM, cb, il);
- cb(attn_norm_output, "attn_norm", il);
- // self-attention
- {
- struct ggml_tensor * Qcur = nullptr;
- struct ggml_tensor * Kcur = nullptr;
- struct ggml_tensor * Vcur = nullptr;
- if (model.layers[il].wqkv) {
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, attn_norm_output);
- cb(cur, "wqkv", il);
- cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
- cb(cur, "bqkv", il);
- Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- } else {
- Qcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, attn_norm_output), model.layers[il].bq);
- Kcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, attn_norm_output), model.layers[il].bk);
- Vcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, attn_norm_output), model.layers[il].bv);
- }
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
- freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- // with phi2, we scale the Q to avoid precision issues
- // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66
- Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head)));
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
- freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- attn_norm_output = ggml_get_rows(ctx0, attn_norm_output, inp_out_ids);
- }
- // FF
- {
- ffn_output = llm_build_ffn(ctx0, lctx, attn_norm_output,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
- cb(ffn_output, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_output);
- cur = ggml_add(ctx0, cur, inpL);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.output_norm,
- model.output_norm_b,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output_no_bias", -1);
- cur = ggml_add(ctx0, cur, model.output_b);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_phi3() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa();
- for (int il = 0; il < n_layer; ++il) {
- auto residual = inpL;
- // self-attention
- {
- // rope freq factors for 128k context
- struct ggml_tensor * rope_factors = build_rope_factors(il);
- struct ggml_tensor* attn_norm_output = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm,
- NULL,
- LLM_NORM_RMS, cb, il);
- cb(attn_norm_output, "attn_norm", il);
- struct ggml_tensor * Qcur = nullptr;
- struct ggml_tensor * Kcur = nullptr;
- struct ggml_tensor * Vcur = nullptr;
- if (model.layers[il].wqkv) {
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, attn_norm_output);
- cb(cur, "wqkv", il);
- Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0 * sizeof(float) * (n_embd)));
- Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd)));
- Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa)));
- }
- else {
- Qcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, attn_norm_output), model.layers[il].bq);
- Kcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, attn_norm_output), model.layers[il].bk);
- Vcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, attn_norm_output), model.layers[il].bv);
- }
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig,
- freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head)));
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig,
- freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask_swa, n_tokens, kv_head, n_kv, 1.0f, cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor* inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- residual = ggml_get_rows(ctx0, residual, inp_out_ids);
- }
- cur = ggml_add(ctx0, cur, residual);
- residual = cur;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- // FF
- // special-case: the up and gate tensors are merged into a single tensor
- // TOOD: support into llm_build_ffn
- {
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SWIGLU, LLM_FFN_SEQ, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, residual, cur);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.output_norm,
- NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_plamo() {
- struct ggml_cgraph * gf = ggml_new_graph(ctx0);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- struct ggml_tensor * attention_norm = cur;
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_rot, n_head, n_tokens), inp_pos, nullptr,
- n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow);
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_rot, n_head_kv, n_tokens), inp_pos, nullptr,
- n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow);
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- struct ggml_tensor * sa_out = cur;
- cur = attention_norm;
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- sa_out = ggml_get_rows(ctx0, sa_out, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
- // feed-forward network
- {
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, sa_out);
- cur = ggml_add(ctx0, cur, inpL);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_gpt2() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * pos;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
- cb(pos, "pos_embd", -1);
- inpL = ggml_add(ctx0, inpL, pos);
- cb(inpL, "inpL", -1);
- for (int il = 0; il < n_layer; ++il) {
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm,
- model.layers[il].attn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
- cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
- cb(cur, "bqkv", il);
- struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
- // add the input
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
- cb(ffn_inp, "ffn_inp", il);
- // FF
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm,
- model.layers[il].ffn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.output_norm,
- model.output_norm_b,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_codeshell() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm,
- model.layers[il].attn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
- cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
- cb(cur, "bqkv", il);
- struct ggml_tensor * tmpq = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- struct ggml_tensor * tmpk = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- cb(tmpq, "tmpq", il);
- cb(tmpk, "tmpk", il);
- cb(Vcur, "Vcur", il);
- struct ggml_tensor * Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
- // add the input
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
- cb(ffn_inp, "ffn_inp", il);
- // FF
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm,
- model.layers[il].ffn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.output_norm,
- model.output_norm_b,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_orion() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, model.layers[il].attn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- // if (model.layers[il].bq) {
- // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- // cb(Qcur, "Qcur", il);
- // }
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- // if (model.layers[il].bk) {
- // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- // cb(Kcur, "Kcur", il);
- // }
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- // if (model.layers[il].bv) {
- // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- // cb(Vcur, "Vcur", il);
- // }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, model.output_norm_b,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_internlm2() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- if (model.layers[il].bq) {
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- }
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- if (model.layers[il].bk) {
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- }
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- if (model.layers[il].bv) {
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- // ref: https://arxiv.org/abs/2203.03466
- // https://github.com/ggerganov/llama.cpp/issues/5276#issuecomment-1925774738
- // based on the original build_llama() function
- struct ggml_cgraph * build_minicpm() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- const int64_t n_embd = hparams.n_embd;
- //TODO: if the model varies, these parameters need to be read from the model
- const int64_t n_embd_base = 256;
- const float scale_embd = 12.0f;
- const float scale_depth = 1.4f;
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // scale the input embeddings
- inpL = ggml_scale(ctx0, inpL, scale_embd);
- cb(inpL, "inp_scaled", -1);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- if (model.layers[il].bq) {
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- }
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- if (model.layers[il].bk) {
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- }
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- if (model.layers[il].bv) {
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- // scale_res - scale the hidden states for residual connection
- const float scale_res = scale_depth/sqrtf(float(n_layer));
- cur = ggml_scale(ctx0, cur, scale_res);
- cb(cur, "hidden_scaled", -1);
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- }
- // scale the hidden states for residual connection
- cur = ggml_scale(ctx0, cur, scale_res);
- cb(cur, "hidden_scaled_ffn", -1);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head scaling
- const float scale_lmhead = float(n_embd_base)/float(n_embd);
- cur = ggml_scale(ctx0, cur, scale_lmhead);
- cb(cur, "lmhead_scaling", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_minicpm3() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- //TODO: if the model varies, these parameters need to be read from the model
- const int64_t n_embd_base = 256;
- const float scale_embd = 12.0f;
- const float scale_depth = 1.4f;
- const float kq_scale = 1.0f / sqrtf(float(hparams.n_embd_head_k));
- const uint32_t n_embd_head_qk_rope = hparams.n_rot;
- const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
- const uint32_t kv_lora_rank = hparams.n_lora_kv;
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // scale the input embeddings
- inpL = ggml_scale(ctx0, inpL, scale_embd);
- cb(inpL, "inp_scaled", -1);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- struct ggml_tensor * rope_factors = build_rope_factors(il);
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self_attention
- {
- struct ggml_tensor * q = NULL;
- // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens}
- q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur);
- cb(q, "q", il);
- q = llm_build_norm(ctx0, q, hparams,
- model.layers[il].attn_q_a_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(q, "q", il);
- // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens}
- q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q);
- cb(q, "q", il);
- // split into {n_head * n_embd_head_qk_nope, n_tokens}
- struct ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens,
- ggml_row_size(q->type, hparams.n_embd_head_k),
- ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
- 0);
- cb(q_nope, "q_nope", il);
- // and {n_head * n_embd_head_qk_rope, n_tokens}
- struct ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens,
- ggml_row_size(q->type, hparams.n_embd_head_k),
- ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
- ggml_row_size(q->type, n_embd_head_qk_nope));
- cb(q_pe, "q_pe", il);
- // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens}
- struct ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
- cb(kv_pe_compresseed, "kv_pe_compresseed", il);
- // split into {kv_lora_rank, n_tokens}
- struct ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens,
- kv_pe_compresseed->nb[1],
- 0);
- cb(kv_compressed, "kv_compressed", il);
- // and {n_embd_head_qk_rope, n_tokens}
- struct ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens,
- kv_pe_compresseed->nb[1],
- kv_pe_compresseed->nb[1],
- ggml_row_size(kv_pe_compresseed->type, kv_lora_rank));
- cb(k_pe, "k_pe", il);
- kv_compressed = ggml_cont(ctx0, kv_compressed); // TODO: the CUDA backend does not support non-contiguous norm
- kv_compressed = llm_build_norm(ctx0, kv_compressed, hparams,
- model.layers[il].attn_kv_a_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(kv_compressed, "kv_compressed", il);
- // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens}
- struct ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed);
- cb(kv, "kv", il);
- // split into {n_head * n_embd_head_qk_nope, n_tokens}
- struct ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
- ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v),
- ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)),
- 0);
- cb(k_nope, "k_nope", il);
- // and {n_head * n_embd_head_v, n_tokens}
- struct ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens,
- ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)),
- ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head),
- ggml_row_size(kv->type, (n_embd_head_qk_nope)));
- cb(v_states, "v_states", il);
- v_states = ggml_cont(ctx0, v_states);
- cb(v_states, "v_states", il);
- v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens,
- ggml_row_size(kv->type, hparams.n_embd_head_v * n_head),
- 0);
- cb(v_states, "v_states", il);
- q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend does not support non-contiguous RoPE
- q_pe = ggml_rope_ext(
- ctx0, q_pe, inp_pos, rope_factors,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(q_pe, "q_pe", il);
- // shared RoPE key
- k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend does not support non-contiguous RoPE
- k_pe = ggml_rope_ext(
- ctx0, k_pe, inp_pos, rope_factors,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(k_pe, "k_pe", il);
- struct ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0);
- cb(q_states, "q_states", il);
- struct ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);
- cb(k_states, "k_states", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- k_states, v_states, q_states, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- // scale_res - scale the hidden states for residual connection
- const float scale_res = scale_depth/sqrtf(float(n_layer));
- cur = ggml_scale(ctx0, cur, scale_res);
- cb(cur, "hidden_scaled", il);
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- }
- // scale the hidden states for residual connection
- cur = ggml_scale(ctx0, cur, scale_res);
- cb(cur, "hidden_scaled_ffn", il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head scaling
- const float scale_lmhead = float(n_embd_base)/float(n_embd);
- cur = ggml_scale(ctx0, cur, scale_lmhead);
- cb(cur, "lmhead_scaling", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_gemma() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head_k = hparams.n_embd_head_k;
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
- cb(inpL, "inp_scaled", -1);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow);
- cb(Qcur, "Qcur", il);
- Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k)));
- cb(Qcur, "Qcur_scaled", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow);
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
- struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
- cb(sa_out, "sa_out", il);
- cur = llm_build_norm(ctx0, sa_out, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- // feed-forward network
- {
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_GELU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, sa_out);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_gemma2() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head_k = hparams.n_embd_head_k;
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
- cb(inpL, "inp_scaled", -1);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- // gemma 2 requires different mask for layers using sliding window (SWA)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask(true);
- struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa(true);
- for (int il = 0; il < n_layer; ++il) {
- // (il % 2) layers use SWA
- struct ggml_tensor * KQ_mask_l = (il % 2 == 0) ? KQ_mask_swa : KQ_mask;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow);
- cb(Qcur, "Qcur", il);
- // ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e
- switch (model.type) {
- case e_model::MODEL_2B:
- case e_model::MODEL_9B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); break;
- case e_model::MODEL_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break;
- default: GGML_ABORT("fatal error");
- };
- cb(Qcur, "Qcur_scaled", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow);
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask_l, n_tokens, kv_head, n_kv, 1.0f, cb, il);
- }
- cur = llm_build_norm(ctx0, cur, hparams,
- model.layers[il].attn_post_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_post_norm", il);
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
- struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
- cb(sa_out, "sa_out", il);
- cur = llm_build_norm(ctx0, sa_out, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- // feed-forward network
- {
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_GELU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = llm_build_norm(ctx0, cur, hparams,
- model.layers[il].ffn_post_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "ffn_post_norm", -1);
- cur = ggml_add(ctx0, cur, sa_out);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- // final logit soft-capping
- cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_final_logit_softcapping);
- cur = ggml_tanh(ctx0, cur);
- cur = ggml_scale(ctx0, cur, hparams.f_final_logit_softcapping);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_starcoder2() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, model.layers[il].attn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- if (model.layers[il].bq) {
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- }
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- if (model.layers[il].bk) {
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- }
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- if (model.layers[il].bv) {
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, model.layers[il].ffn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
- cb(cur, "ffn_out", il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, model.output_norm_b,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_mamba() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- // {n_embd, n_tokens}
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- struct ggml_tensor * state_copy = build_inp_s_copy();
- struct ggml_tensor * state_mask = build_inp_s_mask();
- for (int il = 0; il < n_layer; ++il) {
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- cur = llm_build_mamba(ctx0, lctx, batch, gf, cur,
- state_copy, state_mask,
- kv_head, n_kv, cb, il);
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
- // residual
- cur = ggml_add(ctx0, cur, inpL);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- // final rmsnorm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_command_r() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- const float f_logit_scale = hparams.f_logit_scale;
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm", il);
- struct ggml_tensor * ffn_inp = cur;
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- if (model.layers[il].bq) {
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- }
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- if (model.layers[il].bk) {
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- }
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- if (model.layers[il].bv) {
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- }
- if (model.layers[il].attn_q_norm) {
- Qcur = ggml_view_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens,
- ggml_element_size(Qcur) * n_embd_head,
- ggml_element_size(Qcur) * n_embd_head * n_head,
- 0);
- cb(Qcur, "Qcur", il);
- Kcur = ggml_view_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens,
- ggml_element_size(Kcur) * n_embd_head,
- ggml_element_size(Kcur) * n_embd_head * n_head_kv,
- 0);
- cb(Kcur, "Kcur", il);
- Qcur = llm_build_norm(ctx0, Qcur, hparams,
- model.layers[il].attn_q_norm,
- NULL,
- LLM_NORM, cb, il);
- cb(Qcur, "Qcur", il);
- Kcur = llm_build_norm(ctx0, Kcur, hparams,
- model.layers[il].attn_k_norm,
- NULL,
- LLM_NORM, cb, il);
- cb(Kcur, "Kcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
- }
- struct ggml_tensor * attn_out = cur;
- // feed-forward network
- {
- cur = llm_build_ffn(ctx0, lctx, ffn_inp,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- }
- // add together residual + FFN + self-attention
- cur = ggml_add(ctx0, cur, inpL);
- cur = ggml_add(ctx0, cur, attn_out);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- if (f_logit_scale) {
- cur = ggml_scale(ctx0, cur, f_logit_scale);
- }
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- // ref: https://allenai.org/olmo
- // based on the original build_llama() function, changes:
- // * non-parametric layer norm
- // * clamp qkv
- // * removed bias
- // * removed MoE
- struct ggml_cgraph * build_olmo() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- NULL, NULL,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- if (hparams.f_clamp_kqv > 0.0f) {
- Qcur = ggml_clamp(ctx0, Qcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
- cb(Qcur, "Qcur", il);
- }
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- if (hparams.f_clamp_kqv > 0.0f) {
- Kcur = ggml_clamp(ctx0, Kcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
- cb(Kcur, "Kcur", il);
- }
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- if (hparams.f_clamp_kqv > 0.0f) {
- Vcur = ggml_clamp(ctx0, Vcur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
- cb(Vcur, "Vcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, nullptr,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- NULL, NULL,
- LLM_NORM, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cb(cur, "ffn_out", il);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- NULL, NULL,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- // based on the build_qwen2moe() function, changes:
- // * removed shared experts
- // * removed bias
- // * added q, k norm
- struct ggml_cgraph * build_olmoe() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self_attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- Qcur = llm_build_norm(ctx0, Qcur, hparams, model.layers[il].attn_q_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(Qcur, "Qcur_normed", il);
- Kcur = llm_build_norm(ctx0, Kcur, hparams, model.layers[il].attn_k_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(Kcur, "Kcur_normed", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur_rope", il);
- Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur_rope", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // MoE branch
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_moe_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_gate_inp,
- model.layers[il].ffn_up_exps,
- model.layers[il].ffn_gate_exps,
- model.layers[il].ffn_down_exps,
- n_expert, n_expert_used,
- LLM_FFN_SILU, false,
- false, 0.0,
- cb, il);
- cb(cur, "ffn_moe_out", il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_openelm() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- const int64_t n_head = hparams.n_head(il);
- const int64_t n_head_kv = hparams.n_head_kv(il);
- const int64_t n_head_qkv = 2*n_head_kv + n_head;
- cur = inpL;
- struct ggml_tensor * residual = cur;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
- cur = ggml_reshape_3d(ctx0, cur, n_embd_head_k, n_head_qkv, n_tokens);
- struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, cur->nb[1], cur->nb[2], 0));
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*n_head));
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*(n_head+n_head_kv)));
- cb(Vcur, "Vcur", il);
- Qcur = llm_build_norm(ctx0, Qcur, hparams,
- model.layers[il].attn_q_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(Qcur, "Qcur", il);
- Kcur = llm_build_norm(ctx0, Kcur, hparams,
- model.layers[il].attn_k_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(Kcur, "Kcur", il);
- Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, NULL, n_rot, rope_type, n_ctx_orig,
- freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, NULL, n_rot, rope_type, n_ctx_orig,
- freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- Vcur = ggml_reshape_2d(ctx0, Vcur, n_embd_head * n_head_kv, n_tokens);
- cb(Qcur, "Vcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- residual = ggml_get_rows(ctx0, residual, inp_out_ids);
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- inpL = cur;
- }
- cur = inpL;
- // norm
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_gptneox() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm,
- model.layers[il].attn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
- cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
- cb(cur, "bqkv", il);
- struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
- // ffn
- if (hparams.use_par_res) {
- // attention and ffn are computed in parallel
- // x = x + attn(ln1(x)) + ffn(ln2(x))
- struct ggml_tensor * attn_out = cur;
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].ffn_norm,
- model.layers[il].ffn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
- cb(cur, "ffn_out", il);
- cur = ggml_add(ctx0, cur, inpL);
- cb(cur, "ffn_out", il);
- cur = ggml_add(ctx0, cur, attn_out);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- } else {
- // attention and ffn are computed sequentially
- // x = x + attn(ln1(x))
- // x = x + ffn(ln2(x))
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
- cb(ffn_inp, "ffn_inp", il);
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm,
- model.layers[il].ffn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
- cb(cur, "ffn_out", il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- }
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.output_norm,
- model.output_norm_b,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_arctic() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- struct ggml_tensor * ffn_out = ggml_add(ctx0, cur, ffn_inp);
- cb(ffn_out, "ffn_out", il);
- // MoE
- cur = llm_build_norm(ctx0, inpSA, hparams,
- model.layers[il].ffn_norm_exps, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm_exps", il);
- cur = llm_build_moe_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_gate_inp,
- model.layers[il].ffn_up_exps,
- model.layers[il].ffn_gate_exps,
- model.layers[il].ffn_down_exps,
- n_expert, n_expert_used,
- LLM_FFN_SILU, true,
- false, 0.0,
- cb, il);
- cb(cur, "ffn_moe_out", il);
- cur = ggml_add(ctx0, cur, ffn_out);
- cb(cur, "ffn_out", il);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_deepseek2() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- bool is_lite = (hparams.n_layer == 27);
- // We have to pre-scale kq_scale and attn_factor to make the YaRN RoPE work correctly.
- // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation.
- const float mscale = attn_factor * (1.0f + hparams.rope_yarn_log_mul * logf(1.0f / freq_scale));
- const float kq_scale = 1.0f*mscale*mscale/sqrtf(float(hparams.n_embd_head_k));
- const float attn_factor_scaled = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale));
- const uint32_t n_embd_head_qk_rope = hparams.n_rot;
- const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
- const uint32_t kv_lora_rank = hparams.n_lora_kv;
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- // {n_embd, n_tokens}
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self_attention
- {
- struct ggml_tensor * q = NULL;
- if (!is_lite) {
- // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens}
- q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur);
- cb(q, "q", il);
- q = llm_build_norm(ctx0, q, hparams,
- model.layers[il].attn_q_a_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(q, "q", il);
- // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens}
- q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q);
- cb(q, "q", il);
- } else {
- q = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
- cb(q, "q", il);
- }
- // split into {n_head * n_embd_head_qk_nope, n_tokens}
- struct ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens,
- ggml_row_size(q->type, hparams.n_embd_head_k),
- ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
- 0);
- cb(q_nope, "q_nope", il);
- // and {n_head * n_embd_head_qk_rope, n_tokens}
- struct ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens,
- ggml_row_size(q->type, hparams.n_embd_head_k),
- ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
- ggml_row_size(q->type, n_embd_head_qk_nope));
- cb(q_pe, "q_pe", il);
- // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens}
- struct ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
- cb(kv_pe_compresseed, "kv_pe_compresseed", il);
- // split into {kv_lora_rank, n_tokens}
- struct ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens,
- kv_pe_compresseed->nb[1],
- 0);
- cb(kv_compressed, "kv_compressed", il);
- // and {n_embd_head_qk_rope, n_tokens}
- struct ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens,
- kv_pe_compresseed->nb[1],
- kv_pe_compresseed->nb[1],
- ggml_row_size(kv_pe_compresseed->type, kv_lora_rank));
- cb(k_pe, "k_pe", il);
- kv_compressed = ggml_cont(ctx0, kv_compressed); // TODO: the CUDA backend does not support non-contiguous norm
- kv_compressed = llm_build_norm(ctx0, kv_compressed, hparams,
- model.layers[il].attn_kv_a_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(kv_compressed, "kv_compressed", il);
- // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens}
- struct ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed);
- cb(kv, "kv", il);
- // split into {n_head * n_embd_head_qk_nope, n_tokens}
- struct ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
- ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v),
- ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)),
- 0);
- cb(k_nope, "k_nope", il);
- // and {n_head * n_embd_head_v, n_tokens}
- struct ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens,
- ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)),
- ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head),
- ggml_row_size(kv->type, (n_embd_head_qk_nope)));
- cb(v_states, "v_states", il);
- v_states = ggml_cont(ctx0, v_states);
- cb(v_states, "v_states", il);
- v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens,
- ggml_row_size(kv->type, hparams.n_embd_head_v * n_head),
- 0);
- cb(v_states, "v_states", il);
- q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend does not support non-contiguous RoPE
- q_pe = ggml_rope_ext(
- ctx0, q_pe, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor_scaled, beta_fast, beta_slow
- );
- cb(q_pe, "q_pe", il);
- // shared RoPE key
- k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend does not support non-contiguous RoPE
- k_pe = ggml_rope_ext(
- ctx0, k_pe, inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor_scaled, beta_fast, beta_slow
- );
- cb(k_pe, "k_pe", il);
- struct ggml_tensor * q_states = ggml_concat(ctx0, q_nope, q_pe, 0);
- cb(q_states, "q_states", il);
- struct ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);
- cb(k_states, "k_states", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- k_states, v_states, q_states, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- if ((uint32_t) il < hparams.n_layer_dense_lead) {
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- } else {
- // MoE branch
- ggml_tensor * moe_out =
- llm_build_moe_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_gate_inp,
- model.layers[il].ffn_up_exps,
- model.layers[il].ffn_gate_exps,
- model.layers[il].ffn_down_exps,
- n_expert, n_expert_used,
- LLM_FFN_SILU, false,
- true, hparams.expert_weights_scale,
- cb, il);
- cb(moe_out, "ffn_moe_out", il);
- // FFN shared expert
- {
- ggml_tensor * ffn_shexp = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up_shexp, NULL, NULL,
- model.layers[il].ffn_gate_shexp, NULL, NULL,
- model.layers[il].ffn_down_shexp, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(ffn_shexp, "ffn_shexp", il);
- cur = ggml_add(ctx0, moe_out, ffn_shexp);
- cb(cur, "ffn_out", il);
- }
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = ggml_mul_mat(ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_bitnet() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- if (model.layers[il].wq_scale) {
- Qcur = ggml_mul(ctx0, Qcur, model.layers[il].wq_scale);
- }
- cb(Qcur, "Qcur", il);
- if (model.layers[il].bq) {
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- }
- // B1.K
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- if (model.layers[il].wk_scale) {
- Kcur = ggml_mul(ctx0, Kcur, model.layers[il].wk_scale);
- }
- cb(Kcur, "Kcur", il);
- if (model.layers[il].bk) {
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- }
- // B1.V
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- if (model.layers[il].wv_scale) {
- Vcur = ggml_mul(ctx0, Vcur, model.layers[il].wv_scale);
- }
- cb(Vcur, "Vcur", il);
- if (model.layers[il].bv) {
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- NULL, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cur = llm_build_norm(ctx0, cur, hparams,
- model.layers[il].attn_sub_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_sub_norm", il);
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo, cur);
- if (model.layers[il].wo_scale) {
- cur = ggml_mul(ctx0, cur, model.layers[il].wo_scale);
- }
- if (model.layers[il].bo) {
- cur = ggml_add(ctx0, cur, model.layers[il].bo);
- }
- cb(cur, "attn_o_out", il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward forward
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, model.layers[il].ffn_up_scale,
- model.layers[il].ffn_gate, NULL, model.layers[il].ffn_gate_scale,
- NULL, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_sub_out", il);
- cur = llm_build_norm(ctx0, cur, hparams,
- model.layers[il].ffn_sub_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_sub_norm", il);
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].ffn_down, cur);
- if (model.layers[il].ffn_down_scale) {
- cur = ggml_mul(ctx0, cur, model.layers[il].ffn_down_scale);
- }
- cb(cur, "ffn_down", il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.tok_embd, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_t5_encoder() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- GGML_ASSERT(lctx.is_encoding);
- struct ggml_tensor * pos_bucket_enc = llm_build_pos_bucket(false);
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask_enc = build_inp_KQ_mask(false);
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm_enc, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq_enc, cur);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk_enc, cur);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv_enc, cur);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
- struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
- struct ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3));
- struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
- cb(kq, "kq", il);
- struct ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b_enc ? model.layers[il].attn_rel_b_enc : model.layers[0].attn_rel_b_enc;
- struct ggml_tensor * pos_bias = llm_build_pos_bias(pos_bucket_enc, attn_rel_b);
- struct ggml_tensor * kq_b = ggml_add(ctx0, kq, pos_bias);
- cb(kq_b, "kq_b", il);
- kq = ggml_soft_max_ext(ctx0, kq_b, KQ_mask_enc, 1.0f, hparams.f_max_alibi_bias);
- cb(kq, "kq_soft_max_ext", il);
- struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_tokens)));
- cb(v, "v", il);
- struct ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_tokens, n_embd_head, n_head_kv), kq);
- cb(kqv, "kqv", il);
- struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
- cb(kqv_merged, "kqv_merged", il);
- cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens);
- cb(cur, "kqv_merged_cont", il);
- ggml_build_forward_expand(gf, cur);
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo_enc, cur);
- cb(cur, "kqv_out", il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm_enc, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- // T5 uses relu, flan-T5 uses gelu-gated
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up_enc, NULL, NULL,
- model.layers[il].ffn_gate_enc, NULL, NULL,
- model.layers[il].ffn_down_enc, NULL, NULL,
- NULL,
- model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU,
- model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ,
- cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cb(cur, "ffn_out", il);
- ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
- if (layer_dir != nullptr) {
- cur = ggml_add(ctx0, cur, layer_dir);
- }
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cb(cur, "result_embd", -1);
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm_enc, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_t5_decoder() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- GGML_ASSERT(!lctx.is_encoding);
- GGML_ASSERT(n_outputs_enc > 0 && "call llama_encode() first");
- struct ggml_tensor * embd_enc = llm_build_inp_embd_enc();
- struct ggml_tensor * pos_bucket_dec = llm_build_pos_bucket(true);
- struct ggml_tensor * KQ_mask_dec = build_inp_KQ_mask();
- struct ggml_tensor * KQ_mask_cross = llm_build_inp_KQ_mask_cross();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- llm_build_kv_store(ctx0, hparams, cparams, kv_self, gf, Kcur, Vcur, n_tokens, kv_head, cb, il);
- struct ggml_tensor * k =
- ggml_view_3d(ctx0, kv_self.k_l[il],
- n_embd_head_k, n_kv, n_head_kv,
- ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa),
- ggml_row_size(kv_self.k_l[il]->type, n_embd_head_k),
- 0);
- cb(k, "k", il);
- struct ggml_tensor * v =
- ggml_view_3d(ctx0, kv_self.v_l[il],
- n_kv, n_embd_head_v, n_head_kv,
- ggml_element_size(kv_self.v_l[il])*n_ctx,
- ggml_element_size(kv_self.v_l[il])*n_ctx*n_embd_head_v,
- 0);
- cb(v, "v", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
- struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
- cb(kq, "kq", il);
- struct ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b ? model.layers[il].attn_rel_b : model.layers[0].attn_rel_b;
- struct ggml_tensor * pos_bias = llm_build_pos_bias(pos_bucket_dec, attn_rel_b);
- struct ggml_tensor * kq_b = ggml_add(ctx0, kq, pos_bias);
- cb(kq_b, "kq_b", il);
- kq = ggml_soft_max_ext(ctx0, kq_b, KQ_mask_dec, 1.0f, hparams.f_max_alibi_bias);
- cb(kq, "kq_soft_max_ext", il);
- struct ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq);
- cb(kqv, "kqv", il);
- struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
- cb(kqv_merged, "kqv_merged", il);
- cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens);
- cb(cur, "kqv_merged_cont", il);
- ggml_build_forward_expand(gf, cur);
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo, cur);
- cb(cur, "kqv_out", il);
- }
- cur = ggml_add(ctx0, cur, inpSA);
- cb(cur, "cross_inp", il);
- struct ggml_tensor * inpCA = cur;
- // norm
- cur = llm_build_norm(ctx0, cur, hparams,
- model.layers[il].attn_norm_cross, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm_cross", il);
- // cross-attention
- {
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq_cross, cur);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk_cross, embd_enc);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv_cross, embd_enc);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_outputs_enc);
- struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
- struct ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3));
- struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
- cb(kq, "kq", il);
- kq = ggml_soft_max_ext(ctx0, kq, KQ_mask_cross, 1.0f, hparams.f_max_alibi_bias);
- cb(kq, "kq_soft_max_ext", il);
- struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_outputs_enc)));
- cb(v, "v", il);
- struct ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_outputs_enc, n_embd_head, n_head_kv), kq);
- cb(kqv, "kqv", il);
- struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
- cb(kqv_merged, "kqv_merged", il);
- cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens);
- cb(cur, "kqv_merged_cont", il);
- ggml_build_forward_expand(gf, cur);
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo_cross, cur);
- cb(cur, "kqv_out", il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpCA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- // T5 uses relu, flan-T5 uses gelu-gated
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU,
- model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ,
- cb, il);
- cb(cur, "ffn_out", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cb(cur, "ffn_out", il);
- ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
- if (layer_dir != nullptr) {
- cur = ggml_add(ctx0, cur, layer_dir);
- }
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cb(cur, "result_embd", -1);
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_jais() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm,
- model.layers[il].attn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
- cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
- cb(cur, "bqkv", il);
- struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*cur->nb[0]*(n_embd)));
- struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*cur->nb[0]*(n_embd)));
- struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*cur->nb[0]*(n_embd + n_embd_gqa)));
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/float(n_embd_head), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
- }
- // add the input
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
- cb(ffn_inp, "ffn_inp", il);
- // FF
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm,
- model.layers[il].ffn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- }
- inpL = ggml_add(ctx0, cur, ffn_inp);
- cb(inpL, "l_out", il);
- }
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.output_norm,
- model.output_norm_b,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_chatglm() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm,
- NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- struct ggml_tensor * Qcur = nullptr;
- struct ggml_tensor * Kcur = nullptr;
- struct ggml_tensor * Vcur = nullptr;
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
- cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
- cb(cur, "bqkv", il);
- Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- cb(Qcur, "Qcur", il);
- cb(Kcur, "Kcur", il);
- cb(Vcur, "Vcur", il);
- //printf("freq_base: %f freq_scale: %f ext_factor: %f attn_factor: %f\n", freq_base, freq_scale, ext_factor, attn_factor);
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur_rope", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur_rope", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, NULL,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- // Add the input
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // FF
- {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm,
- NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SWIGLU, LLM_FFN_SEQ, cb, il);
- cb(cur, "ffn_out", il);
- }
- inpL = ggml_add(ctx0, cur, ffn_inp);
- cb(inpL, "l_out", il);
- }
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.output_norm,
- NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_nemotron() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- //GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm,
- model.layers[il].attn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- if (model.layers[il].bq) {
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- }
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- if (model.layers[il].bk) {
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- }
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- if (model.layers[il].bv) {
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm,
- model.layers[il].ffn_norm_b,
- LLM_NORM, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- NULL, NULL, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cb(cur, "ffn_out", il);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, model.output_norm_b,
- LLM_NORM, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- struct ggml_cgraph * build_exaone() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // rope freq factors for llama3; may return nullptr for llama2 and other models
- struct ggml_tensor * rope_factors = build_rope_factors(il);
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- if (model.layers[il].bq) {
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- }
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- if (model.layers[il].bk) {
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- }
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- if (model.layers[il].bv) {
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cb(cur, "ffn_out", il);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- ggml_cgraph * build_rwkv6() {
- ggml_cgraph *gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // Token shift state dimensions should be 2 * n_emb
- GGML_ASSERT(n_embd == hparams.n_embd_k_s() / 2);
- const int64_t n_seqs = batch.n_seqs;
- const int64_t n_seq_tokens = batch.n_seq_tokens;
- const int64_t n_tokens = batch.n_tokens;
- GGML_ASSERT(n_seqs != 0);
- GGML_ASSERT(batch.equal_seqs);
- GGML_ASSERT(n_tokens == n_seq_tokens * n_seqs);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- struct ggml_tensor * state_copy = build_inp_s_copy();
- struct ggml_tensor * state_mask = build_inp_s_mask();
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- inpL = llm_build_norm(ctx0, inpL, hparams, model.tok_norm, model.tok_norm_b, LLM_NORM, cb, -1);
- for (int il = 0; il < n_layer; ++il) {
- const llama_layer * layer = &model.layers[il];
- // (ab)using the KV cache to store the states
- struct ggml_tensor * token_shift = llm_build_copy_mask_state(ctx0,
- gf, kv_self.k_l[il], state_copy, state_mask,
- hparams.n_embd_k_s(), kv_self.size, kv_head, n_kv, n_seqs);
- struct ggml_tensor * wkv_states = llm_build_copy_mask_state(ctx0,
- gf, kv_self.v_l[il], state_copy, state_mask,
- hparams.n_embd_v_s(), kv_self.size, kv_head, n_kv, n_seqs);
- cur = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs);
- token_shift = ggml_reshape_3d(ctx0, token_shift, n_embd, 2, n_seqs);
- struct ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0);
- struct ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift));
- struct ggml_tensor * x_norm_att = llm_build_norm(ctx0, cur, hparams, layer->attn_norm, layer->attn_norm_b, LLM_NORM, cb, il);
- struct ggml_tensor * x_prev = ggml_concat(
- ctx0,
- att_shift,
- ggml_view_3d(ctx0, x_norm_att, n_embd, n_seq_tokens - 1, n_seqs, x_norm_att->nb[1], x_norm_att->nb[2], 0),
- 1
- );
- cur = ggml_add(ctx0, cur, llm_build_rwkv6_time_mix(lctx, ctx0, layer, x_norm_att, x_prev, &wkv_states));
- ggml_build_forward_expand(gf, cur);
- ggml_build_forward_expand(
- gf,
- ggml_cpy(
- ctx0,
- wkv_states,
- ggml_view_1d(
- ctx0,
- kv_self.v_l[il],
- hparams.n_embd_v_s() * n_seqs,
- hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self.v_l[il])
- )
- )
- );
- struct ggml_tensor * x_norm_ffn = llm_build_norm(ctx0, cur, hparams, layer->attn_norm_2, layer->attn_norm_2_b, LLM_NORM, cb, il);
- x_prev = ggml_concat(
- ctx0,
- ffn_shift,
- ggml_view_3d(ctx0, x_norm_ffn, n_embd, n_seq_tokens - 1, n_seqs, x_norm_ffn->nb[1], x_norm_ffn->nb[2], 0),
- 1
- );
- cur = ggml_add(ctx0, cur, llm_build_rwkv6_channel_mix(lctx, ctx0, layer, x_norm_ffn, x_prev));
- ggml_build_forward_expand(gf, cur);
- struct ggml_tensor * last_norm_att = ggml_view_3d(ctx0, x_norm_att, n_embd, 1, n_seqs, x_norm_att->nb[1], x_norm_att->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(x_norm_att));
- struct ggml_tensor * last_norm_ffn = ggml_view_3d(ctx0, x_norm_ffn, n_embd, 1, n_seqs, x_norm_ffn->nb[1], x_norm_ffn->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(x_norm_ffn));
- token_shift = ggml_concat(ctx0, last_norm_att, last_norm_ffn, 1);
- ggml_build_forward_expand(
- gf,
- ggml_cpy(
- ctx0,
- ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * 2, 0),
- ggml_view_1d(ctx0, kv_self.k_l[il], hparams.n_embd_k_s() * n_seqs, hparams.n_embd_k_s() * kv_head * ggml_element_size(kv_self.k_l[il]))
- )
- );
- if (hparams.rescale_every_n_layers != 0 && (il + 1) % hparams.rescale_every_n_layers == 0) {
- cur = ggml_scale(ctx0, cur, 0.5F);
- }
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens);
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, model.output_norm_b, LLM_NORM, cb, -1);
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- // ref: https://github.com/facebookresearch/chameleon
- // based on the original build_llama() function, changes:
- // * qk-norm
- // * swin-norm
- // * removed bias
- // * removed MoE
- struct ggml_cgraph * build_chameleon() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- // norm
- if (hparams.swin_norm) {
- cur = inpL;
- } else {
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- }
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- if (model.layers[il].attn_q_norm) {
- Qcur = ggml_view_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens,
- ggml_element_size(Qcur) * n_embd_head,
- ggml_element_size(Qcur) * n_embd_head * n_head,
- 0);
- cb(Qcur, "Qcur", il);
- Qcur = llm_build_norm(ctx0, Qcur, hparams,
- model.layers[il].attn_q_norm,
- model.layers[il].attn_q_norm_b,
- LLM_NORM, cb, il);
- cb(Qcur, "Qcur", il);
- }
- if (model.layers[il].attn_k_norm) {
- Kcur = ggml_view_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens,
- ggml_element_size(Kcur) * n_embd_head,
- ggml_element_size(Kcur) * n_embd_head * n_head_kv,
- 0);
- cb(Kcur, "Kcur", il);
- Kcur = llm_build_norm(ctx0, Kcur, hparams,
- model.layers[il].attn_k_norm,
- model.layers[il].attn_k_norm_b,
- LLM_NORM, cb, il);
- cb(Kcur, "Kcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, nullptr,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- if (hparams.swin_norm) {
- cur = llm_build_norm(ctx0, cur, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- }
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- if (!hparams.swin_norm) {
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- }
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, NULL, NULL,
- model.layers[il].ffn_gate, NULL, NULL,
- model.layers[il].ffn_down, NULL, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- if (hparams.swin_norm) {
- cur = llm_build_norm(ctx0, cur, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- }
- cur = ggml_add(ctx0, cur, ffn_inp);
- cb(cur, "ffn_out", il);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output_with_img_logits", -1);
- // TODO: this suppresses the output of image tokens, which is required to enable text-only outputs.
- // Needs to be removed once image outputs are supported.
- int img_token_end_idx = 8196;
- int img_token_start_idx = 4;
- int num_img_tokens = img_token_end_idx - img_token_start_idx;
- // creates 1d tensor of size num_img_tokens and values -FLT_MAX,
- // which ensures that text token values are always at least larger than image token values
- struct ggml_tensor * img_logits = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, num_img_tokens);
- img_logits = ggml_clamp(ctx0, img_logits, -FLT_MAX, -FLT_MAX);
- cb(img_logits, "img_logits", -1);
- cur = ggml_set_1d(ctx0, cur, img_logits, ggml_element_size(cur) * img_token_start_idx);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- ggml_cgraph * build_solar() {
- struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
- // mutable variable, needed during the last layer of the computation to skip unused tokens
- int32_t n_tokens = this->n_tokens;
- const int64_t n_embd_head = hparams.n_embd_head_v;
- GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
- GGML_ASSERT(n_embd_head == hparams.n_rot);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- // inp_pos - contains the positions
- struct ggml_tensor * inp_pos = build_inp_pos();
- // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- struct ggml_tensor * bskcn_1;
- struct ggml_tensor * bskcn_2;
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
- if (hparams.n_bskcn(0, il)) {
- bskcn_1 = inpSA;
- }
- if (hparams.n_bskcn(1, il)) {
- bskcn_2 = inpSA;
- }
- if (hparams.n_bskcn(2, il)) {
- inpSA = ggml_add(
- ctx0,
- ggml_mul(ctx0, bskcn_1, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)),
- ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv))));
- }
- if (hparams.n_bskcn(3, il)) {
- inpSA = ggml_add(
- ctx0,
- ggml_mul(ctx0, bskcn_2, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)),
- ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv))));
- }
- // norm
- cur = llm_build_norm(ctx0, inpL, hparams,
- model.layers[il].attn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "attn_norm", il);
- // self-attention
- {
- // rope freq factors for llama3; may return nullptr for llama2 and other models
- struct ggml_tensor * rope_factors = build_rope_factors(il);
- // compute Q and K and RoPE them
- struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
- cb(Qcur, "Qcur", il);
- if (model.layers[il].bq) {
- Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
- cb(Qcur, "Qcur", il);
- }
- struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
- cb(Kcur, "Kcur", il);
- if (model.layers[il].bk) {
- Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
- cb(Kcur, "Kcur", il);
- }
- struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
- cb(Vcur, "Vcur", il);
- if (model.layers[il].bv) {
- Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
- cb(Vcur, "Vcur", il);
- }
- Qcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Qcur, "Qcur", il);
- Kcur = ggml_rope_ext(
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors,
- n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow
- );
- cb(Kcur, "Kcur", il);
- cur = llm_build_kv(ctx0, lctx, kv_self, gf,
- model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- }
- if (il == n_layer - 1) {
- // skip computing output for unused tokens
- struct ggml_tensor * inp_out_ids = build_inp_out_ids();
- n_tokens = n_outputs;
- cur = ggml_get_rows(ctx0, cur, inp_out_ids);
- inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
- }
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
- cb(ffn_inp, "ffn_inp", il);
- // feed-forward network
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
- model.layers[il].ffn_norm, NULL,
- LLM_NORM_RMS, cb, il);
- cb(cur, "ffn_norm", il);
- cur = llm_build_ffn(ctx0, lctx, cur,
- model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
- model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
- model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
- NULL,
- LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
- cb(cur, "ffn_out", il);
- cur = ggml_add(ctx0, cur, ffn_inp);
- cb(cur, "ffn_out", il);
- cur = lctx.cvec.apply_to(ctx0, cur, il);
- cb(cur, "l_out", il);
- // input for next layer
- inpL = cur;
- }
- cur = inpL;
- cur = llm_build_norm(ctx0, cur, hparams,
- model.output_norm, NULL,
- LLM_NORM_RMS, cb, -1);
- cb(cur, "result_norm", -1);
- // lm_head
- cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
- cb(cur, "result_output", -1);
- ggml_build_forward_expand(gf, cur);
- return gf;
- }
- };
- static struct ggml_cgraph * llama_build_graph_defrag(llama_context & lctx, const std::vector<uint32_t> & ids) {
- llama_ubatch dummy = {};
- dummy.equal_seqs = true;
- llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
- struct llm_build_context llm(lctx, dummy, cb, false);
- llm.init();
- struct ggml_cgraph * result = llm.build_defrag(ids);
- llm.free();
- return result;
- }
- static struct ggml_cgraph * llama_build_graph_k_shift(llama_context & lctx) {
- llama_ubatch dummy = {};
- dummy.equal_seqs = true;
- llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
- struct llm_build_context llm(lctx, dummy, cb, false);
- llm.init();
- struct ggml_cgraph * result = llm.build_k_shift();
- llm.free();
- return result;
- }
- static struct ggml_cgraph * llama_build_graph(
- llama_context & lctx,
- const llama_ubatch & batch,
- bool worst_case) {
- const auto & model = lctx.model;
- // this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
- llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) {
- if (il >= 0) {
- ggml_format_name(cur, "%s-%d", name, il);
- } else {
- ggml_set_name(cur, name);
- }
- if (!lctx.cparams.offload_kqv) {
- if (strcmp(name, "kqv_merged_cont") == 0) {
- // all nodes between the KV store and the attention output are run on the CPU
- ggml_backend_sched_set_tensor_backend(lctx.sched, cur, lctx.backend_cpu);
- }
- }
- // norm may be automatically assigned to the backend of the previous layer, increasing data transfer between backends
- // FIXME: fix in ggml_backend_sched
- const bool full_offload = lctx.model.n_gpu_layers > (int)lctx.model.hparams.n_layer;
- if (batch.n_tokens < 32 || full_offload) {
- if (il != -1 && strcmp(name, "norm") == 0) {
- for (auto * backend : lctx.backends) {
- if (ggml_backend_supports_buft(backend, lctx.model.buft_layer[il].buft) &&
- (ggml_backend_supports_op(backend, cur) || ggml_backend_offload_op(backend, cur))) {
- ggml_backend_sched_set_tensor_backend(lctx.sched, cur, backend);
- break;
- }
- }
- }
- }
- };
- struct ggml_cgraph * result = NULL;
- struct llm_build_context llm(lctx, batch, cb, worst_case);
- llm.init();
- switch (model.arch) {
- case LLM_ARCH_LLAMA:
- case LLM_ARCH_GRANITE:
- case LLM_ARCH_GRANITE_MOE:
- {
- result = llm.build_llama();
- } break;
- case LLM_ARCH_MLLAMA:
- {
- result = llm.build_mllama();
- } break;
- case LLM_ARCH_BAICHUAN:
- {
- result = llm.build_baichuan();
- } break;
- case LLM_ARCH_FALCON:
- {
- result = llm.build_falcon();
- } break;
- case LLM_ARCH_GROK:
- {
- result = llm.build_grok();
- } break;
- case LLM_ARCH_STARCODER:
- {
- result = llm.build_starcoder();
- } break;
- case LLM_ARCH_REFACT:
- {
- result = llm.build_refact();
- } break;
- case LLM_ARCH_BERT:
- case LLM_ARCH_JINA_BERT_V2:
- case LLM_ARCH_NOMIC_BERT:
- {
- result = llm.build_bert();
- } break;
- case LLM_ARCH_BLOOM:
- {
- result = llm.build_bloom();
- } break;
- case LLM_ARCH_MPT:
- {
- result = llm.build_mpt();
- } break;
- case LLM_ARCH_STABLELM:
- {
- result = llm.build_stablelm();
- } break;
- case LLM_ARCH_QWEN:
- {
- result = llm.build_qwen();
- } break;
- case LLM_ARCH_QWEN2:
- {
- result = llm.build_qwen2();
- } break;
- case LLM_ARCH_QWEN2MOE:
- {
- result = llm.build_qwen2moe();
- } break;
- case LLM_ARCH_PHI2:
- {
- result = llm.build_phi2();
- } break;
- case LLM_ARCH_PHI3:
- {
- result = llm.build_phi3();
- } break;
- case LLM_ARCH_PLAMO:
- {
- result = llm.build_plamo();
- } break;
- case LLM_ARCH_GPT2:
- {
- result = llm.build_gpt2();
- } break;
- case LLM_ARCH_CODESHELL:
- {
- result = llm.build_codeshell();
- } break;
- case LLM_ARCH_ORION:
- {
- result = llm.build_orion();
- } break;
- case LLM_ARCH_INTERNLM2:
- {
- result = llm.build_internlm2();
- } break;
- case LLM_ARCH_MINICPM:
- {
- result = llm.build_minicpm();
- } break;
- case LLM_ARCH_MINICPM3:
- {
- result = llm.build_minicpm3();
- } break;
- case LLM_ARCH_GEMMA:
- {
- result = llm.build_gemma();
- } break;
- case LLM_ARCH_GEMMA2:
- {
- result = llm.build_gemma2();
- } break;
- case LLM_ARCH_STARCODER2:
- {
- result = llm.build_starcoder2();
- } break;
- case LLM_ARCH_MAMBA:
- {
- result = llm.build_mamba();
- } break;
- case LLM_ARCH_XVERSE:
- {
- result = llm.build_xverse();
- } break;
- case LLM_ARCH_COMMAND_R:
- {
- result = llm.build_command_r();
- } break;
- case LLM_ARCH_DBRX:
- {
- result = llm.build_dbrx();
- } break;
- case LLM_ARCH_OLMO:
- {
- result = llm.build_olmo();
- } break;
- case LLM_ARCH_OLMOE:
- {
- result = llm.build_olmoe();
- } break;
- case LLM_ARCH_OPENELM:
- {
- result = llm.build_openelm();
- } break;
- case LLM_ARCH_GPTNEOX:
- {
- result = llm.build_gptneox();
- } break;
- case LLM_ARCH_ARCTIC:
- {
- result = llm.build_arctic();
- } break;
- case LLM_ARCH_DEEPSEEK2:
- {
- result = llm.build_deepseek2();
- } break;
- case LLM_ARCH_CHATGLM:
- {
- result = llm.build_chatglm();
- } break;
- case LLM_ARCH_BITNET:
- {
- result = llm.build_bitnet();
- } break;
- case LLM_ARCH_T5:
- {
- if (lctx.is_encoding) {
- result = llm.build_t5_encoder();
- } else {
- result = llm.build_t5_decoder();
- }
- } break;
- case LLM_ARCH_T5ENCODER:
- {
- result = llm.build_t5_encoder();
- } break;
- case LLM_ARCH_JAIS:
- {
- result = llm.build_jais();
- } break;
- case LLM_ARCH_NEMOTRON:
- {
- result = llm.build_nemotron();
- } break;
- case LLM_ARCH_EXAONE:
- {
- result = llm.build_exaone();
- } break;
- case LLM_ARCH_RWKV6:
- {
- result = llm.build_rwkv6();
- } break;
- case LLM_ARCH_CHAMELEON:
- {
- result = llm.build_chameleon();
- } break;
- case LLM_ARCH_SOLAR:
- {
- result = llm.build_solar();
- } break;
- default:
- GGML_ABORT("fatal error");
- }
- // add on pooling layer
- if (lctx.cparams.embeddings) {
- result = llm.append_pooling(result);
- }
- llm.free();
- return result;
- }
- static void llama_set_k_shift(llama_context & lctx) {
- const int64_t kv_size = lctx.kv_self.size;
- assert(ggml_backend_buffer_is_host(lctx.inp_K_shift->buffer));
- int32_t * data = (int32_t *) lctx.inp_K_shift->data;
- for (int i = 0; i < kv_size; ++i) {
- data[i] = lctx.kv_self.cells[i].delta;
- }
- }
- static void llama_set_s_copy(llama_context & lctx) {
- const int64_t kv_size = lctx.kv_self.size;
- assert(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer));
- int32_t * data = (int32_t *) lctx.inp_s_copy->data;
- for (int i = 0; i < kv_size; ++i) {
- data[i] = lctx.kv_self.cells[i].src;
- }
- }
- static int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) {
- // TODO move to hparams if a T5 variant appears that uses a different value
- const int64_t max_distance = 128;
- if (bidirectional) {
- n_buckets >>= 1;
- }
- const int64_t max_exact = n_buckets >> 1;
- int32_t relative_position = x - y;
- int32_t relative_bucket = 0;
- if (bidirectional) {
- relative_bucket += (relative_position > 0) * n_buckets;
- relative_position = abs(relative_position);
- } else {
- relative_position = -std::min<int32_t>(relative_position, 0);
- }
- int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact));
- relative_position_if_large = std::min<int32_t>(relative_position_if_large, n_buckets - 1);
- relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large);
- return relative_bucket;
- }
- static void llama_set_inputs(llama_context & lctx, const llama_ubatch & batch) {
- //
- // set input data
- //
- const auto & hparams = lctx.model.hparams;
- const auto & cparams = lctx.cparams;
- const auto & kv_self = lctx.kv_self;
- if (batch.token) {
- const int64_t n_tokens = batch.n_tokens;
- ggml_backend_tensor_set(lctx.inp_tokens, batch.token, 0, n_tokens*ggml_element_size(lctx.inp_tokens));
- }
- if (batch.embd) {
- if (lctx.inp_cross_attn_state && lctx.inp_cross_attn_state->buffer) {
- ggml_backend_tensor_set(lctx.inp_cross_attn_state, batch.embd, 0, ggml_nbytes(lctx.inp_cross_attn_state));
- // zero out inp_embd since it's not used
- float * inp_embd_data = (float *)lctx.inp_embd->data;
- for (int i = 0; i < ggml_nelements(lctx.inp_embd); ++i) {
- inp_embd_data[i] = 0.0f;
- }
- } else {
- const int64_t n_embd = hparams.n_embd;
- const int64_t n_tokens = batch.n_tokens;
- ggml_backend_tensor_set(lctx.inp_embd, batch.embd, 0, n_tokens*n_embd*ggml_element_size(lctx.inp_embd));
- }
- }
- if (batch.pos && lctx.inp_pos) {
- const int64_t n_tokens = batch.n_tokens;
- ggml_backend_tensor_set(lctx.inp_pos, batch.pos, 0, n_tokens*ggml_element_size(lctx.inp_pos));
- }
- if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) {
- GGML_ASSERT(lctx.inp_out_ids && "every model that can must skip unused outputs");
- const int64_t n_tokens = batch.n_tokens;
- GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_out_ids->buffer));
- int32_t * data = (int32_t *) lctx.inp_out_ids->data;
- if (lctx.n_outputs == n_tokens) {
- for (int i = 0; i < n_tokens; ++i) {
- data[i] = i;
- }
- } else if (batch.output) {
- int32_t n_outputs = 0;
- for (int i = 0; i < n_tokens; ++i) {
- if (batch.output[i]) {
- data[n_outputs++] = i;
- }
- }
- // the graph needs to have been passed the correct number of outputs
- GGML_ASSERT(lctx.n_outputs == n_outputs);
- } else if (lctx.n_outputs == 1) {
- // only keep last output
- data[0] = n_tokens - 1;
- } else {
- GGML_ASSERT(lctx.n_outputs == 0);
- }
- }
- GGML_ASSERT(
- // (!a || b) is a logical implication (a -> b)
- // !hparams.causal_attn -> !cparams.causal_attn
- (hparams.causal_attn || !cparams.causal_attn) &&
- "causal attention is not supported by this model"
- );
- if (lctx.inp_KQ_mask || lctx.inp_KQ_mask_swa) {
- // NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache.
- if (cparams.causal_attn && !lctx.is_encoding) {
- const int64_t n_kv = kv_self.n;
- const int64_t n_tokens = batch.n_tokens;
- const int64_t n_seq_tokens = batch.n_seq_tokens;
- const int64_t n_seqs = batch.n_seqs;
- float * data = nullptr;
- float * data_swa = nullptr;
- if (lctx.inp_KQ_mask) {
- GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
- data = (float *) lctx.inp_KQ_mask->data;
- }
- if (lctx.inp_KQ_mask_swa) {
- GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_swa->buffer));
- data_swa = (float *) lctx.inp_KQ_mask_swa->data;
- }
- // For causal attention, use only the previous KV cells
- // of the correct sequence for each token of the batch.
- // It's assumed that if a token in the batch has multiple sequences, they are equivalent.
- for (int h = 0; h < 1; ++h) {
- for (int s = 0; s < n_seqs; ++s) {
- const llama_seq_id seq_id = batch.seq_id[s][0];
- for (int j = 0; j < n_seq_tokens; ++j) {
- const llama_pos pos = batch.pos[s*n_seq_tokens + j];
- for (int i = 0; i < n_kv; ++i) {
- float f;
- if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) {
- f = -INFINITY;
- } else {
- if (hparams.use_alibi) {
- f = -std::abs(kv_self.cells[i].pos - pos);
- } else {
- f = 0.0f;
- }
- }
- if (data) {
- data[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f;
- }
- // may need to cut off old tokens for sliding window
- if (data_swa) {
- if (pos - kv_self.cells[i].pos >= (int32_t)hparams.n_swa) {
- f = -INFINITY;
- }
- data_swa[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f;
- }
- }
- }
- }
- if (data) {
- for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
- for (int j = 0; j < n_kv; ++j) {
- data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
- }
- }
- }
- if (data_swa) {
- for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
- for (int j = 0; j < n_kv; ++j) {
- data_swa[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
- }
- }
- }
- }
- } else {
- const int64_t n_tokens = batch.n_tokens;
- const int64_t n_seq_tokens = batch.n_seq_tokens;
- const int64_t n_seqs = batch.n_seqs;
- // when using kv cache, the mask needs to match the kv cache size
- const int64_t n_stride = hparams.causal_attn && !lctx.is_encoding ? kv_self.n : n_tokens;
- GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
- float * data = (float *) lctx.inp_KQ_mask->data;
- for (int h = 0; h < 1; ++h) {
- for (int s1 = 0; s1 < n_seqs; ++s1) {
- const llama_seq_id seq_id = batch.seq_id[s1][0];
- for (int j = 0; j < n_seq_tokens; ++j) {
- const int32_t tj = s1*n_seq_tokens + j;
- for (int s0 = 0; s0 < n_seqs; ++s0) {
- for (int i = 0; i < n_seq_tokens; ++i) {
- const int32_t ti = s0*n_seq_tokens + i;
- float f = -INFINITY;
- for (int s = 0; s < batch.n_seq_id[s0]; ++s) {
- if (batch.seq_id[s0][s] == seq_id) {
- if (hparams.use_alibi) {
- f = -std::abs(batch.pos[ti] - batch.pos[tj]);
- } else {
- f = 0.0f;
- }
- break;
- }
- }
- data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f;
- }
- }
- for (int i = n_tokens; i < n_stride; ++i) {
- data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY;
- }
- }
- }
- }
- }
- }
- if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
- const int64_t n_tokens = batch.n_tokens;
- const int64_t n_seq_tokens = batch.n_seq_tokens;
- const int64_t n_seqs = batch.n_seqs;
- GGML_ASSERT(lctx.inp_mean);
- GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer));
- float * data = (float *) lctx.inp_mean->data;
- memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean));
- std::vector<uint64_t> sum(n_tokens, 0);
- for (int s = 0; s < n_seqs; ++s) {
- const llama_seq_id seq_id = batch.seq_id[s][0];
- // TODO: adapt limits to n_seqs when batch.equal_seqs is true
- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN");
- sum[seq_id] += batch.n_seq_tokens;
- }
- std::vector<float> div(n_tokens, 0.0f);
- for (int i = 0; i < n_tokens; ++i) {
- const uint64_t s = sum[i];
- if (s > 0) {
- div[i] = 1.0f/float(s);
- }
- }
- for (int s = 0; s < n_seqs; ++s) {
- const llama_seq_id seq_id = batch.seq_id[s][0];
- for (int i = 0; i < n_seq_tokens; ++i) {
- data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id];
- }
- }
- }
- if (cparams.embeddings && (
- cparams.pooling_type == LLAMA_POOLING_TYPE_CLS ||
- cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) {
- const int64_t n_tokens = batch.n_tokens;
- const int64_t n_seq_tokens = batch.n_seq_tokens;
- const int64_t n_seqs = batch.n_seqs;
- GGML_ASSERT(lctx.inp_cls);
- GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
- uint32_t * data = (uint32_t *) lctx.inp_cls->data;
- memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls));
- for (int s = 0; s < n_seqs; ++s) {
- const llama_seq_id seq_id = batch.seq_id[s][0];
- // TODO: adapt limits to n_seqs when batch.equal_seqs is true
- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK");
- for (int i = 0; i < n_seq_tokens; ++i) {
- const llama_pos pos = batch.pos[s*n_seq_tokens + i];
- if (pos == 0) {
- data[seq_id] = s*n_seq_tokens + i;
- }
- }
- }
- }
- if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) {
- const int64_t n_tokens = batch.n_tokens;
- const int64_t n_seq_tokens = batch.n_seq_tokens;
- const int64_t n_seqs = batch.n_seqs;
- GGML_ASSERT(lctx.inp_cls);
- GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
- uint32_t * data = (uint32_t *) lctx.inp_cls->data;
- memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls));
- std::vector<int> last_pos(n_tokens, -1);
- std::vector<int> last_row(n_tokens, -1);
- for (int s = 0; s < n_seqs; ++s) {
- const llama_seq_id seq_id = batch.seq_id[s][0];
- // TODO: adapt limits to n_seqs when batch.equal_seqs is true
- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST");
- for (int i = 0; i < n_seq_tokens; ++i) {
- const llama_pos pos = batch.pos[s*n_seq_tokens + i];
- if (pos >= last_pos[seq_id]) {
- last_pos[seq_id] = pos;
- last_row[seq_id] = s*n_seq_tokens + i;
- }
- }
- }
- for (int i = 0; i < n_tokens; ++i) {
- if (last_row[i] >= 0) {
- data[i] = last_row[i];
- }
- }
- }
- if (kv_self.recurrent) {
- const int64_t n_kv = kv_self.n;
- if (lctx.inp_s_mask) {
- GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_mask->buffer));
- float * data = (float *) lctx.inp_s_mask->data;
- // clear unused states
- for (int i = 0; i < n_kv; ++i) {
- const uint32_t cell_id = i + kv_self.head;
- llama_kv_cell & kv_cell = lctx.kv_self.cells[cell_id];
- data[i] = (float) (kv_cell.src >= 0);
- // only clear once
- if (kv_cell.src < 0) {
- kv_cell.src = cell_id;
- }
- }
- }
- if (lctx.inp_s_copy) {
- GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer));
- int32_t * data = (int32_t *) lctx.inp_s_copy->data;
- // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
- for (uint32_t i = 0; i < n_kv; ++i) {
- const uint32_t cell_id = i + kv_self.head;
- llama_kv_cell & kv_cell = lctx.kv_self.cells[cell_id];
- // prevent out-of-bound sources
- if (kv_cell.src < 0 || (uint32_t) kv_cell.src >= kv_self.size) {
- kv_cell.src = cell_id;
- }
- data[i] = kv_cell.src;
- // ensure copy only happens once
- if (kv_cell.src != (int32_t) cell_id) {
- kv_cell.src = cell_id;
- }
- }
- }
- }
- if (lctx.inp_pos_bucket) {
- const int64_t n_tokens = batch.n_tokens;
- GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_pos_bucket->buffer));
- GGML_ASSERT(!batch.equal_seqs); // TODO: use batch.n_seqs instead of failing
- int32_t * data = (int32_t *) lctx.inp_pos_bucket->data;
- if (!lctx.is_encoding) {
- const int64_t n_kv = kv_self.n;
- for (int h = 0; h < 1; ++h) {
- for (int j = 0; j < n_tokens; ++j) {
- for (int i = 0; i < n_kv; ++i) {
- data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(lctx.kv_self.cells[i].pos, batch.pos[j], hparams.n_rel_attn_bkts, lctx.is_encoding);
- }
- }
- }
- } else {
- for (int h = 0; h < 1; ++h) {
- for (int j = 0; j < n_tokens; ++j) {
- for (int i = 0; i < n_tokens; ++i) {
- data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(batch.pos[i], batch.pos[j], hparams.n_rel_attn_bkts, lctx.is_encoding);
- }
- }
- }
- }
- }
- if (!lctx.is_encoding && lctx.inp_embd_enc) {
- assert(lctx.inp_embd_enc->type == GGML_TYPE_F32);
- assert((size_t) ggml_nelements(lctx.inp_embd_enc) == lctx.embd_enc.size());
- ggml_backend_tensor_set(lctx.inp_embd_enc, lctx.embd_enc.data(), 0, ggml_nbytes(lctx.inp_embd_enc));
- }
- if (!lctx.is_encoding && lctx.inp_KQ_mask_cross) {
- const int64_t n_output_enc = lctx.embd_enc.size() / hparams.n_embd;
- const int64_t n_tokens = batch.n_tokens;
- GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_cross->buffer));
- GGML_ASSERT(!batch.equal_seqs); // TODO: use batch.n_seqs instead of failing
- float * data = (float *) lctx.inp_KQ_mask_cross->data;
- for (int h = 0; h < 1; ++h) {
- for (int j = 0; j < n_tokens; ++j) {
- for (int i = 0; i < n_output_enc; ++i) {
- float f = -INFINITY;
- for (int s = 0; s < batch.n_seq_id[j]; ++s) {
- const llama_seq_id seq_id = batch.seq_id[j][s];
- if (lctx.seq_ids_enc[i].find(seq_id) != lctx.seq_ids_enc[i].end()) {
- f = 0.0f;
- }
- }
- data[h*(n_output_enc*n_tokens) + j*n_output_enc + i] = f;
- }
- }
- for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
- for (int j = 0; j < n_output_enc; ++j) {
- data[h*(n_output_enc*n_tokens) + i*n_output_enc + j] = -INFINITY;
- }
- }
- }
- }
- }
- // Make sure enough space is available for outputs.
- // Returns max number of outputs for which space was reserved.
- static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) {
- const auto & cparams = lctx.cparams;
- const auto & hparams = lctx.model.hparams;
- const size_t n_outputs_max = std::max(n_outputs, (size_t) cparams.n_seq_max);
- const auto n_batch = cparams.n_batch;
- const auto n_vocab = hparams.n_vocab;
- const auto n_embd = hparams.n_embd;
- // TODO: use a per-batch flag for logits presence instead
- const bool has_logits = cparams.causal_attn;
- const bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE);
- const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0;
- const size_t embd_size = has_embd ? n_embd*n_outputs_max : 0;
- if (lctx.output_ids.empty()) {
- // init, never resized afterwards
- lctx.output_ids.resize(n_batch);
- }
- const size_t prev_size = lctx.buf_output ? ggml_backend_buffer_get_size(lctx.buf_output) : 0;
- const size_t new_size = (logits_size + embd_size) * sizeof(float);
- // alloc only when more than the current capacity is required
- // TODO: also consider shrinking the buffer
- if (!lctx.buf_output || prev_size < new_size) {
- if (lctx.buf_output) {
- #ifndef NDEBUG
- // This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark)
- LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
- #endif
- ggml_backend_buffer_free(lctx.buf_output);
- lctx.buf_output = nullptr;
- lctx.logits = nullptr;
- lctx.embd = nullptr;
- }
- lctx.buf_output = ggml_backend_buft_alloc_buffer(llama_default_buffer_type_cpu(true), new_size);
- if (lctx.buf_output == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0));
- return 0;
- }
- }
- float * output_base = (float *) ggml_backend_buffer_get_base(lctx.buf_output);
- lctx.logits = has_logits ? output_base : nullptr;
- lctx.embd = has_embd ? output_base + logits_size : nullptr;
- lctx.output_size = n_outputs_max;
- lctx.logits_size = logits_size;
- lctx.embd_size = embd_size;
- // set all ids as invalid (negative)
- std::fill(lctx.output_ids.begin(), lctx.output_ids.end(), -1);
- ggml_backend_buffer_clear(lctx.buf_output, 0);
- lctx.n_outputs = 0;
- return n_outputs_max;
- }
- // make the outputs have the same order they had in the user-provided batch
- static void llama_output_reorder(struct llama_context * ctx) {
- std::vector<size_t> & out_ids = ctx->sbatch.out_ids;
- if (!out_ids.empty()) {
- uint32_t n_vocab = ctx->model.hparams.n_vocab;
- uint32_t n_embd = ctx->model.hparams.n_embd;
- int32_t n_outputs = ctx->n_outputs;
- GGML_ASSERT((size_t) n_outputs == out_ids.size());
- // TODO: is there something more efficient which also minimizes swaps?
- // selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort)
- for (int32_t i = 0; i < n_outputs - 1; ++i) {
- int32_t j_min = i;
- for (int32_t j = i + 1; j < n_outputs; ++j) {
- if (out_ids[j] < out_ids[j_min]) {
- j_min = j;
- }
- }
- if (j_min == i) { continue; }
- std::swap(out_ids[i], out_ids[j_min]);
- if (ctx->logits_size > 0) {
- for (uint32_t k = 0; k < n_vocab; k++) {
- std::swap(ctx->logits[i*n_vocab + k], ctx->logits[j_min*n_vocab + k]);
- }
- }
- if (ctx->embd_size > 0) {
- for (uint32_t k = 0; k < n_embd; k++) {
- std::swap(ctx->embd[i*n_embd + k], ctx->embd[j_min*n_embd + k]);
- }
- }
- }
- std::fill(ctx->output_ids.begin(), ctx->output_ids.end(), -1);
- for (int32_t i = 0; i < n_outputs; ++i) {
- ctx->output_ids[out_ids[i]] = i;
- }
- out_ids.clear();
- }
- }
- static void llama_graph_compute(
- llama_context & lctx,
- ggml_cgraph * gf,
- int n_threads,
- ggml_threadpool * threadpool) {
- if (lctx.backend_cpu != nullptr) {
- ggml_backend_cpu_set_n_threads(lctx.backend_cpu, n_threads);
- ggml_backend_cpu_set_threadpool(lctx.backend_cpu, threadpool);
- ggml_backend_cpu_set_abort_callback(lctx.backend_cpu, lctx.abort_callback, lctx.abort_callback_data);
- }
- #ifdef GGML_USE_BLAS
- if (lctx.backend_blas != nullptr) {
- ggml_backend_blas_set_n_threads(lctx.backend_blas, n_threads);
- }
- #endif
- ggml_backend_sched_graph_compute_async(lctx.sched, gf);
- // fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(lctx.sched));
- }
- // decode a batch of tokens by evaluating the transformer
- //
- // - lctx: llama context
- // - batch: batch to evaluate
- //
- // return 0 on success
- // return positive int on warning
- // return negative int on error
- //
- static int llama_decode_internal(
- llama_context & lctx,
- llama_batch batch_all) { // TODO: rename back to batch
- lctx.is_encoding = false;
- const uint32_t n_tokens_all = batch_all.n_tokens;
- if (n_tokens_all == 0) {
- LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
- return -1;
- }
- const auto & model = lctx.model;
- const auto & hparams = model.hparams;
- const auto & cparams = lctx.cparams;
- GGML_ASSERT((!batch_all.token && batch_all.embd) || (batch_all.token && !batch_all.embd)); // NOLINT
- if (batch_all.token) {
- for (uint32_t i = 0; i < n_tokens_all; ++i) {
- if (batch_all.token[i] < 0 || (uint32_t)batch_all.token[i] >= model.vocab.n_vocab) {
- LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch_all.token[i]);
- return -1;
- }
- }
- }
- GGML_ASSERT(n_tokens_all <= cparams.n_batch);
- GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens");
- if (lctx.t_compute_start_us == 0) {
- lctx.t_compute_start_us = ggml_time_us();
- }
- lctx.n_queued_tokens += n_tokens_all;
- auto & kv_self = lctx.kv_self;
- const int64_t n_embd = hparams.n_embd;
- const int64_t n_vocab = hparams.n_vocab;
- uint32_t n_outputs = 0;
- uint32_t n_outputs_prev = 0;
- const auto n_ubatch = cparams.n_ubatch;
- // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens
- const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE;
- lctx.embd_seq.clear();
- // count outputs
- if (batch_all.logits && !embd_pooled) {
- for (uint32_t i = 0; i < n_tokens_all; ++i) {
- n_outputs += batch_all.logits[i] != 0;
- }
- } else if (lctx.logits_all || embd_pooled) {
- n_outputs = n_tokens_all;
- } else {
- // keep last output only
- n_outputs = 1;
- }
- lctx.sbatch.from_batch(batch_all, batch_all.n_embd,
- /* simple_split */ !kv_self.recurrent,
- /* logits_all */ n_outputs == n_tokens_all);
- // reserve output buffer
- if (llama_output_reserve(lctx, n_outputs) < n_outputs) {
- LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_outputs);
- return -2;
- };
- while (lctx.sbatch.n_tokens > 0) {
- llama_ubatch ubatch;
- if (kv_self.recurrent) {
- if (embd_pooled) {
- // Pooled embeddings cannot be split across ubatches (yet)
- ubatch = lctx.sbatch.split_seq(n_ubatch);
- } else {
- // recurrent model architectures are easier to implement
- // with equal-length sequences
- ubatch = lctx.sbatch.split_equal(n_ubatch);
- }
- } else {
- ubatch = lctx.sbatch.split_simple(n_ubatch);
- }
- const uint32_t n_tokens = ubatch.n_tokens;
- // count the outputs in this u_batch
- {
- int32_t n_outputs_new = 0;
- if (n_outputs == n_tokens_all) {
- n_outputs_new = n_tokens;
- } else {
- GGML_ASSERT(ubatch.output);
- for (uint32_t i = 0; i < n_tokens; i++) {
- n_outputs_new += (int32_t) (ubatch.output[i] != 0);
- }
- }
- // needs to happen before the graph is built
- lctx.n_outputs = n_outputs_new;
- }
- int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
- ggml_threadpool_t threadpool = n_tokens == 1 ? lctx.threadpool : lctx.threadpool_batch;
- GGML_ASSERT(n_threads > 0);
- // non-causal masks do not use the KV cache
- if (hparams.causal_attn) {
- llama_kv_cache_update(&lctx);
- // if we have enough unused cells before the current head ->
- // better to start searching from the beginning of the cache, hoping to fill it
- if (kv_self.head > kv_self.used + 2*n_tokens) {
- kv_self.head = 0;
- }
- if (!llama_kv_cache_find_slot(kv_self, ubatch)) {
- return 1;
- }
- if (!kv_self.recurrent) {
- // a heuristic, to avoid attending the full cache if it is not yet utilized
- // after enough generations, the benefit from this heuristic disappears
- // if we start defragmenting the cache, the benefit from this will be more important
- const uint32_t pad = llama_kv_cache_get_padding(cparams);
- kv_self.n = std::min(kv_self.size, std::max(pad, GGML_PAD(llama_kv_cache_cell_max(kv_self), pad)));
- //kv_self.n = llama_kv_cache_cell_max(kv_self);
- }
- }
- //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head);
- ggml_backend_sched_reset(lctx.sched);
- ggml_backend_sched_set_eval_callback(lctx.sched, lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data);
- ggml_cgraph * gf = llama_build_graph(lctx, ubatch, false);
- // the output is always the last tensor in the graph
- struct ggml_tensor * res = ggml_graph_node(gf, -1);
- struct ggml_tensor * embd = ggml_graph_node(gf, -2);
- if (lctx.n_outputs == 0) {
- // no output
- res = nullptr;
- embd = nullptr;
- }
- if (cparams.embeddings) {
- for (int i = ggml_graph_n_nodes(gf) - 1; i >= 0; --i) {
- embd = ggml_graph_node(gf, i);
- if (strcmp(ggml_graph_node(gf, i)->name, "result_embd_pooled") == 0) {
- break;
- }
- }
- } else {
- embd = nullptr; // do not extract embeddings when not needed
- GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor");
- }
- if (!cparams.causal_attn) {
- res = nullptr; // do not extract logits when not needed
- }
- // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
- ggml_backend_sched_alloc_graph(lctx.sched, gf);
- llama_set_inputs(lctx, ubatch);
- llama_graph_compute(lctx, gf, n_threads, threadpool);
- // update the kv ring buffer
- {
- kv_self.head += n_tokens;
- // Ensure kv cache head points to a valid index.
- if (kv_self.head >= kv_self.size) {
- kv_self.head = 0;
- }
- }
- // plot the computation graph in dot format (for debugging purposes)
- //if (n_past%100 == 0) {
- // ggml_graph_dump_dot(gf, NULL, "llama.dot");
- //}
- // extract logits
- if (res) {
- ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(lctx.sched, res);
- GGML_ASSERT(backend_res != nullptr);
- GGML_ASSERT(lctx.logits != nullptr);
- float * logits_out = lctx.logits + n_outputs_prev*n_vocab;
- const int32_t n_outputs_new = lctx.n_outputs;
- if (n_outputs_new) {
- GGML_ASSERT( n_outputs_prev + n_outputs_new <= n_outputs);
- GGML_ASSERT((n_outputs_prev + n_outputs_new)*n_vocab <= (int64_t) lctx.logits_size);
- ggml_backend_tensor_get_async(backend_res, res, logits_out, 0, n_outputs_new*n_vocab*sizeof(float));
- }
- }
- // extract embeddings
- if (embd) {
- ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(lctx.sched, embd);
- GGML_ASSERT(backend_embd != nullptr);
- switch (cparams.pooling_type) {
- case LLAMA_POOLING_TYPE_NONE:
- {
- // extract token embeddings
- GGML_ASSERT(lctx.embd != nullptr);
- float * embd_out = lctx.embd + n_outputs_prev*n_embd;
- const int32_t n_outputs_new = lctx.n_outputs;
- if (n_outputs_new) {
- GGML_ASSERT( n_outputs_prev + n_outputs_new <= n_outputs);
- GGML_ASSERT((n_outputs_prev + n_outputs_new)*n_embd <= (int64_t) lctx.embd_size);
- ggml_backend_tensor_get_async(backend_embd, embd, embd_out, 0, n_outputs_new*n_embd*sizeof(float));
- }
- } break;
- case LLAMA_POOLING_TYPE_MEAN:
- case LLAMA_POOLING_TYPE_CLS:
- case LLAMA_POOLING_TYPE_LAST:
- {
- // extract sequence embeddings (cleared before processing each batch)
- auto & embd_seq_out = lctx.embd_seq;
- for (uint32_t s = 0; s < ubatch.n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch.seq_id[s][0];
- if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
- continue;
- }
- embd_seq_out[seq_id].resize(n_embd);
- ggml_backend_tensor_get_async(backend_embd, embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float));
- }
- } break;
- case LLAMA_POOLING_TYPE_RANK:
- {
- // extract the rerank score - a single float per sequence
- auto & embd_seq_out = lctx.embd_seq;
- for (uint32_t s = 0; s < ubatch.n_seqs; ++s) {
- const llama_seq_id seq_id = ubatch.seq_id[s][0];
- if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
- continue;
- }
- embd_seq_out[seq_id].resize(1);
- ggml_backend_tensor_get_async(backend_embd, embd, embd_seq_out[seq_id].data(), (seq_id)*sizeof(float), sizeof(float));
- }
- } break;
- case LLAMA_POOLING_TYPE_UNSPECIFIED:
- {
- GGML_ABORT("unknown pooling type");
- }
- }
- }
- n_outputs_prev += lctx.n_outputs;
- }
- // set output mappings
- {
- bool sorted_output = true;
- GGML_ASSERT(lctx.sbatch.out_ids.size() == n_outputs);
- for (size_t i = 0; i < n_outputs; ++i) {
- size_t out_id = lctx.sbatch.out_ids[i];
- lctx.output_ids[out_id] = i;
- if (out_id != i) {
- sorted_output = false;
- }
- }
- if (sorted_output) {
- lctx.sbatch.out_ids.clear();
- }
- }
- // set to total number of outputs in the batch, for use in llama_get_logits_ith
- lctx.n_outputs = n_outputs;
- // wait for the computation to finish (automatically done when obtaining the model output)
- //llama_synchronize(&lctx);
- // decide if we need to defrag the kv cache
- if (cparams.causal_attn && cparams.defrag_thold >= 0.0f) {
- const float fragmentation = kv_self.n >= 128 ? 1.0f - float(kv_self.used)/float(kv_self.n) : 0.0f;
- // queue defragmentation for next llama_kv_cache_update
- if (fragmentation > cparams.defrag_thold) {
- //LLAMA_LOG_INFO("fragmentation: %.2f\n", fragmentation);
- llama_kv_cache_defrag(kv_self);
- }
- }
- // Reset state for the next token before backend sync, to allow the CPU activities in the reset to
- // overlap with device computation.
- ggml_backend_sched_reset(lctx.sched);
- return 0;
- }
- // encode a batch of tokens by evaluating the encoder part of the transformer
- //
- // - lctx: llama context
- // - batch: batch to evaluate
- //
- // return 0 on success
- // return positive int on warning
- // return negative int on error
- //
- static int llama_encode_internal(
- llama_context & lctx,
- llama_batch batch) {
- lctx.is_encoding = true;
- const uint32_t n_tokens = batch.n_tokens;
- if (n_tokens == 0) {
- LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
- return -1;
- }
- const auto & model = lctx.model;
- const auto & hparams = model.hparams;
- const auto & cparams = lctx.cparams;
- GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
- if (batch.token) {
- for (uint32_t i = 0; i < n_tokens; ++i) {
- if (batch.token[i] < 0 || (uint32_t)batch.token[i] >= model.vocab.n_vocab) {
- LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]);
- return -1;
- }
- }
- }
- // micro-batching is not possible for non-causal encoding, so we process the batch in a single shot
- GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens");
- if (lctx.t_compute_start_us == 0) {
- lctx.t_compute_start_us = ggml_time_us();
- }
- lctx.n_queued_tokens += n_tokens;
- const int64_t n_embd = hparams.n_embd;
- lctx.sbatch.from_batch(batch, batch.n_embd, /* simple_split */ true, /* logits_all */ true);
- const llama_ubatch ubatch = lctx.sbatch.split_simple(n_tokens);
- // reserve output buffer
- if (llama_output_reserve(lctx, n_tokens) < n_tokens) {
- LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens);
- return -2;
- };
- for (uint32_t i = 0; i < n_tokens; ++i) {
- lctx.output_ids[i] = i;
- }
- lctx.inp_embd_enc = NULL;
- lctx.n_outputs = n_tokens;
- int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
- ggml_threadpool_t threadpool = n_tokens == 1 ? lctx.threadpool : lctx.threadpool_batch;
- GGML_ASSERT(n_threads > 0);
- ggml_backend_sched_reset(lctx.sched);
- ggml_backend_sched_set_eval_callback(lctx.sched, lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data);
- ggml_cgraph * gf = llama_build_graph(lctx, ubatch, false);
- // the output embeddings after the final encoder normalization
- struct ggml_tensor * embd = nullptr;
- // there are two cases here
- if (llama_model_has_decoder(&lctx.model)) {
- // first case is an encoder-decoder T5 model where embeddings are passed to decoder
- embd = ggml_graph_node(gf, -1);
- GGML_ASSERT(strcmp(embd->name, "result_norm") == 0 && "missing result_output tensor");
- } else {
- // second case is an encoder-only T5 model
- if (cparams.embeddings) {
- // only output embeddings if required
- embd = ggml_graph_node(gf, -1);
- if (strcmp(embd->name, "result_embd_pooled") != 0) {
- embd = ggml_graph_node(gf, -2);
- }
- GGML_ASSERT(strcmp(embd->name, "result_embd_pooled") == 0 && "missing embeddings tensor");
- }
- }
- ggml_backend_sched_alloc_graph(lctx.sched, gf);
- llama_set_inputs(lctx, ubatch);
- llama_graph_compute(lctx, gf, n_threads, threadpool);
- // extract embeddings
- if (embd) {
- ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(lctx.sched, embd);
- GGML_ASSERT(backend_embd != nullptr);
- if (llama_model_has_decoder(&lctx.model)) {
- lctx.embd_enc.resize(n_tokens*n_embd);
- float * embd_out = lctx.embd_enc.data();
- ggml_backend_tensor_get_async(backend_embd, embd, embd_out, 0, n_tokens*n_embd*sizeof(float));
- GGML_ASSERT(!ubatch.equal_seqs); // TODO: handle equal splits
- // remember the sequence ids used during the encoding - needed for cross attention later
- lctx.seq_ids_enc.resize(n_tokens);
- for (uint32_t i = 0; i < n_tokens; i++) {
- for (int s = 0; s < ubatch.n_seq_id[i]; s++) {
- llama_seq_id seq_id = ubatch.seq_id[i][s];
- lctx.seq_ids_enc[i].insert(seq_id);
- }
- }
- } else {
- GGML_ASSERT(lctx.embd != nullptr);
- switch (cparams.pooling_type) {
- case LLAMA_POOLING_TYPE_NONE:
- {
- // extract token embeddings
- GGML_ASSERT(lctx.embd != nullptr);
- float * embd_out = lctx.embd;
- GGML_ASSERT(n_tokens*n_embd <= (int64_t) lctx.embd_size);
- ggml_backend_tensor_get_async(backend_embd, embd, embd_out, 0, n_tokens*n_embd*sizeof(float));
- } break;
- case LLAMA_POOLING_TYPE_MEAN:
- case LLAMA_POOLING_TYPE_CLS:
- case LLAMA_POOLING_TYPE_LAST:
- {
- // extract sequence embeddings
- auto & embd_seq_out = lctx.embd_seq;
- embd_seq_out.clear();
- GGML_ASSERT(!ubatch.equal_seqs); // TODO: handle equal splits
- for (uint32_t i = 0; i < n_tokens; i++) {
- const llama_seq_id seq_id = ubatch.seq_id[i][0];
- if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
- continue;
- }
- embd_seq_out[seq_id].resize(n_embd);
- ggml_backend_tensor_get_async(backend_embd, embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float));
- }
- } break;
- case LLAMA_POOLING_TYPE_RANK:
- {
- // TODO: this likely should be the same logic as in llama_decoder_internal, but better to
- // wait for an encoder model that requires this pooling type in order to test it
- // https://github.com/ggerganov/llama.cpp/pull/9510
- GGML_ABORT("RANK pooling not implemented yet");
- }
- case LLAMA_POOLING_TYPE_UNSPECIFIED:
- {
- GGML_ABORT("unknown pooling type");
- }
- }
- }
- }
- // Reset state for the next token before backend sync, to allow the CPU activities in the reset to
- // overlap with device computation.
- ggml_backend_sched_reset(lctx.sched);
- return 0;
- }
- // find holes from the beginning of the KV cache and fill them by moving data from the end of the cache
- static void llama_kv_cache_defrag_internal(struct llama_context & lctx) {
- auto & kv_self = lctx.kv_self;
- const auto & hparams = lctx.model.hparams;
- const uint32_t n_layer = hparams.n_layer;
- const uint32_t n_kv = llama_kv_cache_cell_max(kv_self);
- const uint32_t n_used = kv_self.used;
- assert(n_used <= n_kv);
- //const int64_t t_start = ggml_time_us();
- // number of cells moved
- uint32_t n_moves = 0;
- // each move requires 6*n_layer tensors (see build_defrag)
- // - source view, destination view, copy operation
- // - x2 for keys and values
- //const uint32_t max_moves = llama_model_max_nodes(model)/(6*n_layer);
- // TODO: tmp fix https://github.com/ggerganov/llama.cpp/issues/6685#issuecomment-2057579516
- const uint32_t max_moves = (llama_model_max_nodes(lctx.model) - 2*n_layer)/(6*n_layer);
- // determine which KV cells to move where
- //
- // cell i moves to ids[i]
- //
- // if ids[i] == i || ids[i] == n_kv, then cell i is not moved
- //
- std::vector<uint32_t> ids(n_kv, n_kv);
- for (uint32_t i0 = 0; i0 < n_used; ++i0) {
- const auto & cell0 = kv_self.cells[i0];
- if (!cell0.is_empty()) {
- ids[i0] = i0;
- continue;
- }
- // found a hole - fill it with data from the end of the cache
- uint32_t nh = 1;
- // determine the size of the hole
- while (i0 + nh < n_used && kv_self.cells[i0 + nh].is_empty()) {
- nh++;
- }
- uint32_t nf = 0;
- uint32_t is = n_kv - 1;
- // starting from the end, find nh non-empty cells
- for (; is > i0; --is) {
- const auto & cell1 = kv_self.cells[is];
- if (cell1.is_empty() || ids[is] != n_kv) {
- continue;
- }
- // non-empty cell which is not yet moved
- nf++;
- if (nf == nh) {
- break;
- }
- }
- // this can only happen if `n_used` is not accurate, which would be a bug
- GGML_ASSERT(nf == nh && "KV defrag bug: nf != nh");
- nf = 0;
- uint32_t i1 = is;
- // are we moving a continuous block of memory?
- bool cont = false;
- // should we stop searching for the next move?
- bool stop = false;
- // go back and move the nf cells to the hole
- for (; i1 < n_kv; ++i1) {
- auto & cell1 = kv_self.cells[i1];
- if (cell1.is_empty() || ids[i1] != n_kv) {
- if (n_moves == max_moves) {
- stop = true;
- break;
- }
- cont = false;
- continue;
- }
- // this cell goes to (i0 + nf)
- ids[i1] = i0 + nf;
- // move the cell meta data
- kv_self.cells[i0 + nf] = cell1;
- // clear the old cell and move the head there
- cell1 = llama_kv_cell();
- kv_self.head = n_used;
- if (!cont) {
- n_moves++;
- cont = true;
- }
- nf++;
- if (nf == nh) {
- break;
- }
- }
- if (stop || n_moves == max_moves) {
- break;
- }
- //LLAMA_LOG_INFO("(tmp log) KV defrag: move [%u, %u) to [%u, %u)\n", is, i1 + 1, i0, i0 + nh);
- i0 += nh - 1;
- }
- if (n_moves == 0) {
- return;
- }
- //LLAMA_LOG_INFO("(tmp log) KV defrag cell moves: %u\n", n_moves);
- //LLAMA_LOG_INFO("expected gf nodes: %u\n", 6*n_moves*n_layer);
- #if 0
- // CPU defrag
- //
- // TODO: optimizations are possible:
- // - multiple threads
- // - avoid copying to the host memory when already there
- //
- // likely not worth the effort, as we have ggml_graph based defrag
- //
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa();
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa();
- const uint32_t kv_size = kv_self.size;
- std::vector<uint8_t> buf_k;
- std::vector<uint8_t> buf_v;
- for (uint32_t il = 0; il < n_layer; ++il) {
- const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
- const size_t k_size = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa*kv_size);
- const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
- const size_t v_size = ggml_row_size (kv_self.v_l[il]->type, n_embd_v_gqa*kv_size);
- buf_k.resize(k_size);
- buf_v.resize(v_size);
- ggml_backend_tensor_get(kv_self.k_l[il], buf_k.data(), 0, buf_k.size());
- ggml_backend_tensor_get(kv_self.v_l[il], buf_v.data(), 0, buf_v.size());
- // batch move [i, i+nm) to [id, id+nm)
- // note: cells can move only to a lower index
- for (uint32_t i = 0; i < n_kv; ++i) {
- const uint32_t id = ids[i];
- if (i == id || id == n_kv) {
- continue;
- }
- uint32_t nm = 1;
- while (i + nm < n_kv && ids[i + nm] == id + nm) {
- nm++;
- }
- // move keys
- {
- const int64_t os = i*k_size_row;
- const int64_t od = id*k_size_row;
- memcpy(buf_k.data() + od, buf_k.data() + os, nm*k_size_row);
- }
- // move values (note: they are transposed)
- {
- const int64_t os = i;
- const int64_t od = id;
- for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
- memcpy(buf_v.data() + (od + j*kv_size)*v_size_el, buf_v.data() + (os + j*kv_size)*v_size_el, nm*v_size_el);
- }
- }
- i += nm - 1;
- }
- ggml_backend_tensor_set(kv_self.k_l[il], buf_k.data(), 0, buf_k.size());
- ggml_backend_tensor_set(kv_self.v_l[il], buf_v.data(), 0, buf_v.size());
- }
- #else
- // ggml_graph defrag
- ggml_backend_sched_reset(lctx.sched);
- ggml_cgraph * gf = llama_build_graph_defrag(lctx, ids);
- llama_graph_compute(lctx, gf, lctx.cparams.n_threads, lctx.threadpool);
- #endif
- //const int64_t t_end = ggml_time_us();
- //LLAMA_LOG_INFO("(tmp log) KV defrag time: %.3f ms\n", (t_end - t_start)/1000.0);
- }
- static void llama_kv_cache_update_internal(struct llama_context & lctx) {
- bool need_reserve = false;
- // apply K-shift if needed
- if (lctx.model.hparams.rope_type != LLAMA_ROPE_TYPE_NONE && lctx.kv_self.has_shift) {
- if (lctx.model.arch == LLM_ARCH_DEEPSEEK2) { // not supported due to MLA
- GGML_ABORT("Deepseek2 does not support K-shift");
- }
- {
- ggml_backend_sched_reset(lctx.sched);
- ggml_cgraph * gf = llama_build_graph_k_shift(lctx);
- ggml_backend_sched_alloc_graph(lctx.sched, gf);
- llama_set_k_shift(lctx);
- llama_graph_compute(lctx, gf, lctx.cparams.n_threads, lctx.threadpool);
- need_reserve = true;
- }
- {
- auto & kv_self = lctx.kv_self;
- kv_self.has_shift = false;
- for (uint32_t i = 0; i < kv_self.size; ++i) {
- kv_self.cells[i].delta = 0;
- }
- }
- }
- // defragment the KV cache if needed
- if (lctx.kv_self.do_defrag) {
- llama_kv_cache_defrag_internal(lctx);
- need_reserve = true;
- lctx.kv_self.do_defrag = false;
- }
- // reserve a worst case graph again
- if (need_reserve) {
- // TODO: extract to a function
- // build worst-case graph
- uint32_t n_seqs = 1; // TODO: worst-case number of sequences
- uint32_t n_tokens = std::min(lctx.cparams.n_ctx, lctx.cparams.n_ubatch);
- llama_token token = llama_token_bos(&lctx.model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
- llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr};
- ggml_cgraph * gf = llama_build_graph(lctx, ubatch, true);
- // initialize scheduler with the worst-case graph
- ggml_backend_sched_reset(lctx.sched);
- if (!ggml_backend_sched_reserve(lctx.sched, gf)) {
- LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
- }
- }
- }
- //
- // quantization
- //
- struct quantize_state_internal {
- const llama_model & model;
- const llama_model_quantize_params * params;
- int n_attention_wv = 0;
- int n_ffn_down = 0;
- int n_ffn_gate = 0;
- int n_ffn_up = 0;
- int i_attention_wv = 0;
- int i_ffn_down = 0;
- int i_ffn_gate = 0;
- int i_ffn_up = 0;
- int n_k_quantized = 0;
- int n_fallback = 0;
- bool has_imatrix = false;
- // used to figure out if a model shares tok_embd with the output weight
- bool has_output = false;
- quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
- : model(model)
- , params(params)
- {}
- };
- static void llama_tensor_dequantize_internal(
- struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
- const size_t nelements, const int nthread
- ) {
- if (output.size() < nelements) {
- output.resize(nelements);
- }
- float * f32_output = (float *) output.data();
- ggml_type_traits_t qtype;
- if (ggml_is_quantized(tensor->type)) {
- qtype = ggml_internal_get_type_traits(tensor->type);
- if (qtype.to_float == NULL) {
- throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
- }
- } else if (tensor->type != GGML_TYPE_F16 &&
- tensor->type != GGML_TYPE_BF16) {
- throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
- }
- if (nthread < 2) {
- if (tensor->type == GGML_TYPE_F16) {
- ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
- } else if (tensor->type == GGML_TYPE_BF16) {
- ggml_bf16_to_fp32_row((ggml_bf16_t *)tensor->data, f32_output, nelements);
- } else if (ggml_is_quantized(tensor->type)) {
- qtype.to_float(tensor->data, f32_output, nelements);
- } else {
- GGML_ABORT("fatal error"); // unreachable
- }
- return;
- }
- size_t block_size;
- if (tensor->type == GGML_TYPE_F16 ||
- tensor->type == GGML_TYPE_BF16) {
- block_size = 1;
- } else {
- block_size = (size_t)ggml_blck_size(tensor->type);
- }
- size_t block_size_bytes = ggml_type_size(tensor->type);
- GGML_ASSERT(nelements % block_size == 0);
- size_t nblocks = nelements / block_size;
- size_t blocks_per_thread = nblocks / nthread;
- size_t spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
- size_t in_buff_offs = 0;
- size_t out_buff_offs = 0;
- for (int tnum = 0; tnum < nthread; tnum++) {
- size_t thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
- size_t thr_elems = thr_blocks * block_size; // number of elements for this thread
- size_t thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
- auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
- if (typ == GGML_TYPE_F16) {
- ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
- } else if (typ == GGML_TYPE_BF16) {
- ggml_bf16_to_fp32_row((ggml_bf16_t *)inbuf, outbuf, nels);
- } else {
- qtype.to_float(inbuf, outbuf, nels);
- }
- };
- workers.emplace_back(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems);
- in_buff_offs += thr_block_bytes;
- out_buff_offs += thr_elems;
- }
- for (auto & w : workers) { w.join(); }
- workers.clear();
- }
- static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
- const std::string name = ggml_get_name(tensor);
- // TODO: avoid hardcoded tensor names - use the TN_* constants
- const llm_arch arch = qs.model.arch;
- const auto tn = LLM_TN(arch);
- auto use_more_bits = [](int i_layer, int n_layers) -> bool {
- return i_layer < n_layers/8 || i_layer >= 7*n_layers/8 || (i_layer - n_layers/8)%3 == 2;
- };
- const int n_expert = std::max(1, (int)qs.model.hparams.n_expert);
- auto layer_info = [n_expert] (int i_layer, int n_layer, const char * name) {
- if (n_expert > 1) {
- // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but occasionally randomly
- // sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work
- // for getting the current layer as I initially thought, and we need to resort to parsing the
- // tensor name.
- if (sscanf(name, "blk.%d.", &i_layer) != 1) {
- throw std::runtime_error(format("Failed to determine layer for tensor %s", name));
- }
- if (i_layer < 0 || i_layer >= n_layer) {
- throw std::runtime_error(format("Bad layer %d for tensor %s. Must be in [0, %d)", i_layer, name, n_layer));
- }
- }
- return std::make_pair(i_layer, n_layer);
- };
- // for arches that share the same tensor between the token embeddings and the output, we quantize the token embeddings
- // with the quantization of the output tensor
- if (name == tn(LLM_TENSOR_OUTPUT, "weight") || (!qs.has_output && name == tn(LLM_TENSOR_TOKEN_EMBD, "weight"))) {
- if (qs.params->output_tensor_type < GGML_TYPE_COUNT) {
- new_type = qs.params->output_tensor_type;
- } else {
- int nx = tensor->ne[0];
- if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
- new_type = GGML_TYPE_Q8_0;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
- ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ||
- ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
- new_type = GGML_TYPE_Q5_K;
- }
- else if (new_type != GGML_TYPE_Q8_0) {
- new_type = GGML_TYPE_Q6_K;
- }
- }
- } else if (name == "token_embd.weight") {
- if (qs.params->token_embedding_type < GGML_TYPE_COUNT) {
- new_type = qs.params->token_embedding_type;
- } else {
- if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS ||
- ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
- new_type = GGML_TYPE_Q2_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
- new_type = GGML_TYPE_IQ3_S;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
- new_type = GGML_TYPE_IQ3_S;
- }
- else if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8 ||
- new_type == GGML_TYPE_Q4_0_8_8) {
- new_type = GGML_TYPE_Q4_0;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_TQ1_0 || ftype == LLAMA_FTYPE_MOSTLY_TQ2_0) {
- new_type = GGML_TYPE_Q4_K;
- }
- }
- } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
- ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
- if (name.find("attn_v.weight") != std::string::npos) {
- if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
- else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
- ++qs.i_attention_wv;
- }
- else if (qs.model.hparams.n_expert == 8 && name.find("attn_k.weight") != std::string::npos) {
- new_type = GGML_TYPE_Q4_K;
- }
- else if (name.find("ffn_down") != std::string::npos) {
- if (qs.i_ffn_down < qs.n_ffn_down/8) {
- new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
- }
- ++qs.i_ffn_down;
- }
- else if (name.find("attn_output.weight") != std::string::npos) {
- if (qs.model.hparams.n_expert == 8) {
- new_type = GGML_TYPE_Q5_K;
- } else {
- if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ2_XXS;
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_S;
- }
- }
- } else if (name.find("attn_v.weight") != std::string::npos) {
- if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
- new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
- new_type = GGML_TYPE_Q4_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
- new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : !qs.has_imatrix ? GGML_TYPE_IQ3_S : GGML_TYPE_IQ3_XXS;
- }
- else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S) && qs.model.hparams.n_gqa() >= 4) {
- new_type = GGML_TYPE_Q4_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
- new_type = GGML_TYPE_Q4_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
- new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
- else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && qs.model.hparams.n_gqa() >= 4) {
- new_type = GGML_TYPE_Q5_K;
- }
- else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
- use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
- if (qs.model.type == MODEL_70B) {
- // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
- // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
- // nearly negligible increase in model size by quantizing this tensor with more bits:
- if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
- }
- if (qs.model.hparams.n_expert == 8) {
- // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
- // TODO: explore better strategies
- new_type = GGML_TYPE_Q8_0;
- }
- ++qs.i_attention_wv;
- } else if (name.find("attn_k.weight") != std::string::npos) {
- if (qs.model.hparams.n_expert == 8) {
- // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
- // TODO: explore better strategies
- new_type = GGML_TYPE_Q8_0;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
- new_type = GGML_TYPE_IQ3_XXS;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
- new_type = GGML_TYPE_IQ2_S;
- }
- } else if (name.find("attn_q.weight") != std::string::npos) {
- if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
- new_type = GGML_TYPE_IQ3_XXS;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
- new_type = GGML_TYPE_IQ2_S;
- }
- } else if (name.find("ffn_down") != std::string::npos) {
- auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
- int i_layer = info.first, n_layer = info.second;
- if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) {
- if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS && !qs.has_imatrix) {
- new_type = i_layer < n_layer/8 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
- new_type = i_layer < n_layer/16 ? GGML_TYPE_Q5_K
- : arch != LLM_ARCH_FALCON || use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q4_K
- : GGML_TYPE_Q3_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M && (i_layer < n_layer/8 ||
- (qs.model.hparams.n_expert == 8 && use_more_bits(i_layer, n_layer)))) {
- new_type = GGML_TYPE_Q4_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
- new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
- if (arch == LLM_ARCH_FALCON) {
- new_type = i_layer < n_layer/16 ? GGML_TYPE_Q6_K :
- use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
- } else {
- if (use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
- }
- }
- else if (i_layer < n_layer/8 && (ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && !qs.has_imatrix) {
- new_type = GGML_TYPE_Q5_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && i_layer < n_layer/8) {
- new_type = GGML_TYPE_Q5_K;
- }
- else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || ftype == LLAMA_FTYPE_MOSTLY_Q5_0)
- && qs.has_imatrix && i_layer < n_layer/8) {
- // Guard against craziness in the first few ffn_down layers that can happen even with imatrix for Q4_0/Q5_0.
- // We only do it when an imatrix is provided because a) we want to make sure that one can always get the
- // same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix.
- new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1;
- }
- ++qs.i_ffn_down;
- } else if (name.find("attn_output.weight") != std::string::npos) {
- if (arch != LLM_ARCH_FALCON) {
- if (qs.model.hparams.n_expert == 8) {
- if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
- ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL ||
- ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S ||
- ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) {
- new_type = GGML_TYPE_Q5_K;
- }
- } else {
- if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K;
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ) new_type = GGML_TYPE_Q4_K;
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L ) new_type = GGML_TYPE_Q5_K;
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M ) new_type = GGML_TYPE_Q4_K;
- }
- } else {
- if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
- }
- }
- else if (name.find("attn_qkv.weight") != std::string::npos) {
- if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
- new_type = GGML_TYPE_Q4_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
- }
- else if (name.find("ffn_gate") != std::string::npos) {
- auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
- int i_layer = info.first, n_layer = info.second;
- if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
- new_type = GGML_TYPE_IQ3_XXS;
- }
- ++qs.i_ffn_gate;
- }
- else if (name.find("ffn_up") != std::string::npos) {
- auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
- int i_layer = info.first, n_layer = info.second;
- if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
- new_type = GGML_TYPE_IQ3_XXS;
- }
- ++qs.i_ffn_up;
- }
- // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
- //}
- // IK: let's remove this, else Q2_K is almost the same as Q3_K_S
- //else if (name.find("ffn_gate") != std::string::npos || name.find("ffn_up") != std::string::npos) {
- // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
- //}
- // This can be used to reduce the size of the Q5_K_S model.
- // The associated PPL increase is fully in line with the size reduction
- //else {
- // if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K;
- //}
- bool convert_incompatible_tensor = false;
- if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
- new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K || new_type == GGML_TYPE_IQ4_XS ||
- new_type == GGML_TYPE_IQ2_XS || new_type == GGML_TYPE_IQ2_XXS || new_type == GGML_TYPE_IQ2_S ||
- new_type == GGML_TYPE_IQ3_XXS || new_type == GGML_TYPE_IQ1_S || new_type == GGML_TYPE_IQ3_S ||
- new_type == GGML_TYPE_IQ1_M) {
- int nx = tensor->ne[0];
- int ny = tensor->ne[1];
- if (nx % QK_K != 0) {
- LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
- convert_incompatible_tensor = true;
- } else {
- ++qs.n_k_quantized;
- }
- }
- if (convert_incompatible_tensor) {
- switch (new_type) {
- case GGML_TYPE_TQ1_0:
- case GGML_TYPE_TQ2_0: new_type = GGML_TYPE_Q4_0; break; // TODO: use a symmetric type instead
- case GGML_TYPE_IQ2_XXS:
- case GGML_TYPE_IQ2_XS:
- case GGML_TYPE_IQ2_S:
- case GGML_TYPE_IQ3_XXS:
- case GGML_TYPE_IQ3_S:
- case GGML_TYPE_IQ1_S:
- case GGML_TYPE_IQ1_M:
- case GGML_TYPE_Q2_K:
- case GGML_TYPE_Q3_K:
- case GGML_TYPE_IQ4_XS: new_type = GGML_TYPE_IQ4_NL; break;
- case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
- case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
- case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
- default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
- }
- if (tensor->ne[0] % ggml_blck_size(new_type) != 0) {
- new_type = GGML_TYPE_F16;
- }
- LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
- ++qs.n_fallback;
- }
- return new_type;
- }
- static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector<std::thread> & workers, const int nthread) {
- if (nthread < 2) {
- // single-thread
- size_t new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix);
- if (!ggml_validate_row_data(new_type, new_data, new_size)) {
- throw std::runtime_error("quantized data validation failed");
- }
- return new_size;
- }
- std::mutex mutex;
- int64_t counter = 0;
- size_t new_size = 0;
- bool valid = true;
- auto compute = [&mutex, &counter, &new_size, &valid, new_type, f32_data, new_data, chunk_size,
- nrows, n_per_row, imatrix]() {
- const int64_t nrows_per_chunk = chunk_size / n_per_row;
- size_t local_size = 0;
- while (true) {
- std::unique_lock<std::mutex> lock(mutex);
- int64_t first_row = counter; counter += nrows_per_chunk;
- if (first_row >= nrows) {
- if (local_size > 0) {
- new_size += local_size;
- }
- break;
- }
- lock.unlock();
- const int64_t this_nrow = std::min(nrows - first_row, nrows_per_chunk);
- size_t this_size = ggml_quantize_chunk(new_type, f32_data, new_data, first_row * n_per_row, this_nrow, n_per_row, imatrix);
- local_size += this_size;
- // validate the quantized data
- const size_t row_size = ggml_row_size(new_type, n_per_row);
- void * this_data = (char *) new_data + first_row * row_size;
- if (!ggml_validate_row_data(new_type, this_data, this_size)) {
- std::unique_lock<std::mutex> lock(mutex);
- valid = false;
- break;
- }
- }
- };
- for (int it = 0; it < nthread - 1; ++it) {
- workers.emplace_back(compute);
- }
- compute();
- for (auto & w : workers) { w.join(); }
- workers.clear();
- if (!valid) {
- throw std::runtime_error("quantized data validation failed");
- }
- return new_size;
- }
- static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
- ggml_type default_type;
- llama_ftype ftype = params->ftype;
- switch (params->ftype) {
- case LLAMA_FTYPE_MOSTLY_Q4_0: default_type = GGML_TYPE_Q4_0; break;
- case LLAMA_FTYPE_MOSTLY_Q4_1: default_type = GGML_TYPE_Q4_1; break;
- case LLAMA_FTYPE_MOSTLY_Q5_0: default_type = GGML_TYPE_Q5_0; break;
- case LLAMA_FTYPE_MOSTLY_Q5_1: default_type = GGML_TYPE_Q5_1; break;
- case LLAMA_FTYPE_MOSTLY_Q8_0: default_type = GGML_TYPE_Q8_0; break;
- case LLAMA_FTYPE_MOSTLY_F16: default_type = GGML_TYPE_F16; break;
- case LLAMA_FTYPE_MOSTLY_BF16: default_type = GGML_TYPE_BF16; break;
- case LLAMA_FTYPE_ALL_F32: default_type = GGML_TYPE_F32; break;
- // K-quants
- case LLAMA_FTYPE_MOSTLY_Q2_K_S:
- case LLAMA_FTYPE_MOSTLY_Q2_K: default_type = GGML_TYPE_Q2_K; break;
- case LLAMA_FTYPE_MOSTLY_IQ3_XS: default_type = GGML_TYPE_IQ3_S; break;
- case LLAMA_FTYPE_MOSTLY_Q3_K_S:
- case LLAMA_FTYPE_MOSTLY_Q3_K_M:
- case LLAMA_FTYPE_MOSTLY_Q3_K_L: default_type = GGML_TYPE_Q3_K; break;
- case LLAMA_FTYPE_MOSTLY_Q4_K_S:
- case LLAMA_FTYPE_MOSTLY_Q4_K_M: default_type = GGML_TYPE_Q4_K; break;
- case LLAMA_FTYPE_MOSTLY_Q5_K_S:
- case LLAMA_FTYPE_MOSTLY_Q5_K_M: default_type = GGML_TYPE_Q5_K; break;
- case LLAMA_FTYPE_MOSTLY_Q6_K: default_type = GGML_TYPE_Q6_K; break;
- case LLAMA_FTYPE_MOSTLY_TQ1_0: default_type = GGML_TYPE_TQ1_0; break;
- case LLAMA_FTYPE_MOSTLY_TQ2_0: default_type = GGML_TYPE_TQ2_0; break;
- case LLAMA_FTYPE_MOSTLY_IQ2_XXS: default_type = GGML_TYPE_IQ2_XXS; break;
- case LLAMA_FTYPE_MOSTLY_IQ2_XS: default_type = GGML_TYPE_IQ2_XS; break;
- case LLAMA_FTYPE_MOSTLY_IQ2_S: default_type = GGML_TYPE_IQ2_XS; break;
- case LLAMA_FTYPE_MOSTLY_IQ2_M: default_type = GGML_TYPE_IQ2_S; break;
- case LLAMA_FTYPE_MOSTLY_IQ3_XXS: default_type = GGML_TYPE_IQ3_XXS; break;
- case LLAMA_FTYPE_MOSTLY_IQ1_S: default_type = GGML_TYPE_IQ1_S; break;
- case LLAMA_FTYPE_MOSTLY_IQ1_M: default_type = GGML_TYPE_IQ1_M; break;
- case LLAMA_FTYPE_MOSTLY_IQ4_NL: default_type = GGML_TYPE_IQ4_NL; break;
- case LLAMA_FTYPE_MOSTLY_IQ4_XS: default_type = GGML_TYPE_IQ4_XS; break;
- case LLAMA_FTYPE_MOSTLY_IQ3_S: default_type = GGML_TYPE_IQ3_S; break;
- case LLAMA_FTYPE_MOSTLY_IQ3_M: default_type = GGML_TYPE_IQ3_S; break;
- case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: default_type = GGML_TYPE_Q4_0_4_4; break;
- case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: default_type = GGML_TYPE_Q4_0_4_8; break;
- case LLAMA_FTYPE_MOSTLY_Q4_0_8_8: default_type = GGML_TYPE_Q4_0_8_8; break;
- default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
- }
- int nthread = params->nthread;
- if (nthread <= 0) {
- nthread = std::thread::hardware_concurrency();
- }
- // mmap consistently increases speed Linux, and also increases speed on Windows with
- // hot cache. It may cause a slowdown on macOS, possibly related to free memory.
- #if defined(__linux__) || defined(_WIN32)
- constexpr bool use_mmap = true;
- #else
- constexpr bool use_mmap = false;
- #endif
- llama_model_kv_override * kv_overrides = nullptr;
- if (params->kv_overrides) {
- auto v = (std::vector<llama_model_kv_override>*)params->kv_overrides;
- kv_overrides = v->data();
- }
- llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, kv_overrides);
- ml.init_mappings(false); // no prefetching
- llama_model model;
- llm_load_arch(ml, model);
- llm_load_hparams(ml, model);
- struct quantize_state_internal qs(model, params);
- if (params->only_copy) {
- ftype = model.ftype;
- }
- const std::unordered_map<std::string, std::vector<float>> * imatrix_data = nullptr;
- if (params->imatrix) {
- imatrix_data = static_cast<const std::unordered_map<std::string, std::vector<float>>*>(params->imatrix);
- if (imatrix_data) {
- LLAMA_LOG_INFO("================================ Have weights data with %d entries\n",int(imatrix_data->size()));
- qs.has_imatrix = true;
- // check imatrix for nans or infs
- for (const auto & kv : *imatrix_data) {
- for (float f : kv.second) {
- if (!std::isfinite(f)) {
- throw std::runtime_error(format("imatrix contains non-finite value %f\n", f));
- }
- }
- }
- }
- }
- const size_t align = GGUF_DEFAULT_ALIGNMENT;
- struct gguf_context * ctx_out = gguf_init_empty();
- // copy the KV pairs from the input file
- gguf_set_kv (ctx_out, ml.meta);
- gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION); // TODO: use LLM_KV
- gguf_set_val_u32(ctx_out, "general.file_type", ftype); // TODO: use LLM_KV
- // Remove split metadata
- gguf_remove_key(ctx_out, ml.llm_kv(LLM_KV_SPLIT_NO).c_str());
- gguf_remove_key(ctx_out, ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str());
- gguf_remove_key(ctx_out, ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str());
- if (params->kv_overrides) {
- const std::vector<llama_model_kv_override> & overrides = *(const std::vector<llama_model_kv_override> *)params->kv_overrides;
- for (auto & o : overrides) {
- if (o.key[0] == 0) break;
- if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) {
- gguf_set_val_f32(ctx_out, o.key, o.val_f64);
- } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
- gguf_set_val_i32(ctx_out, o.key, o.val_i64);
- } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
- gguf_set_val_bool(ctx_out, o.key, o.val_bool);
- } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) {
- gguf_set_val_str(ctx_out, o.key, o.val_str);
- } else {
- LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key);
- }
- }
- }
- for (int i = 0; i < ml.n_tensors; ++i) {
- const struct ggml_tensor * meta = ml.get_tensor_meta(i);
- const std::string name = ggml_get_name(meta);
- // TODO: avoid hardcoded tensor names - use the TN_* constants
- if (name.find("attn_v.weight") != std::string::npos ||
- name.find("attn_qkv.weight") != std::string::npos ||
- name.find("attn_kv_b.weight")!= std::string::npos) {
- ++qs.n_attention_wv;
- } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
- qs.has_output = true;
- }
- }
- qs.n_ffn_down = qs.n_ffn_gate = qs.n_ffn_up = (int)model.hparams.n_layer;
- // sanity checks
- {
- const auto & n_head_kv_iter = model.hparams.n_head_kv_arr.begin();
- // attention layers have a non-zero number of kv heads
- int32_t n_attn_layer = model.hparams.n_layer - std::count(n_head_kv_iter, n_head_kv_iter + model.hparams.n_layer, 0);
- if (llama_model_has_encoder(&model)) {
- n_attn_layer *= 3;
- }
- if (qs.n_attention_wv != n_attn_layer) {
- LLAMA_LOG_WARN("%s: n_attention_wv is unexpected, expected: %d, found: %d\n", __func__, n_attn_layer, qs.n_attention_wv);
- }
- }
- size_t total_size_org = 0;
- size_t total_size_new = 0;
- std::vector<std::thread> workers;
- workers.reserve(nthread);
- int idx = 0;
- std::vector<no_init<uint8_t>> read_data;
- std::vector<no_init<uint8_t>> work;
- std::vector<no_init<float>> f32_conv_buf;
- uint16_t n_split = 1;
- // Assume split index is continuous
- if (params->keep_split) {
- for (int i = 0; i < ml.n_tensors; ++i) {
- n_split = std::max(uint16_t(ml.get_weight(i)->idx+1), n_split);
- }
- }
- std::vector<gguf_context*> ctx_outs(n_split, NULL);
- ctx_outs[0] = ctx_out;
- // populate the original tensors so we get an initial meta data
- for (int i = 0; i < ml.n_tensors; ++i) {
- auto weight = ml.get_weight(i);
- uint16_t i_split = params->keep_split ? weight->idx : 0;
- struct ggml_tensor * tensor = weight->tensor;
- if (ctx_outs[i_split] == NULL) {
- ctx_outs[i_split] = gguf_init_empty();
- }
- gguf_add_tensor(ctx_outs[i_split], tensor);
- }
- // Set split info if needed
- if (n_split > 1) {
- for (size_t i = 0; i < ctx_outs.size(); ++i) {
- gguf_set_val_u16(ctx_outs[i], ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i);
- gguf_set_val_u16(ctx_outs[i], ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split);
- gguf_set_val_i32(ctx_outs[i], ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), ml.n_tensors);
- }
- }
- int cur_split = -1;
- std::ofstream fout;
- auto close_ofstream = [&]() {
- // Write metadata and close file handler
- if (fout.is_open()) {
- fout.seekp(0);
- std::vector<uint8_t> data(gguf_get_meta_size(ctx_outs[cur_split]));
- gguf_get_meta_data(ctx_outs[cur_split], data.data());
- fout.write((const char *) data.data(), data.size());
- fout.close();
- }
- };
- auto new_ofstream = [&](int index) {
- cur_split = index;
- GGML_ASSERT(ctx_outs[cur_split] && "Find uninitialized gguf_context");
- std::string fname = fname_out;
- if (params->keep_split) {
- char split_path[PATH_MAX] = {0};
- llama_split_path(split_path, sizeof(split_path), fname_out.c_str(), cur_split, n_split);
- fname = std::string(split_path);
- }
- fout = std::ofstream(fname, std::ios::binary);
- fout.exceptions(std::ofstream::failbit); // fail fast on write errors
- const size_t meta_size = gguf_get_meta_size(ctx_outs[cur_split]);
- // placeholder for the meta data
- ::zeros(fout, meta_size);
- };
- const auto tn = LLM_TN(model.arch);
- new_ofstream(0);
- for (int i = 0; i < ml.n_tensors; ++i) {
- auto weight = ml.get_weight(i);
- struct ggml_tensor * tensor = weight->tensor;
- if (weight->idx != cur_split && params->keep_split) {
- close_ofstream();
- new_ofstream(weight->idx);
- }
- const std::string name = ggml_get_name(tensor);
- if (!ml.use_mmap) {
- if (read_data.size() < ggml_nbytes(tensor)) {
- read_data.resize(ggml_nbytes(tensor));
- }
- tensor->data = read_data.data();
- }
- ml.load_data_for(tensor);
- LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
- ++idx, ml.n_tensors,
- ggml_get_name(tensor),
- llama_format_tensor_shape(tensor).c_str(),
- ggml_type_name(tensor->type));
- // This used to be a regex, but <regex> has an extreme cost to compile times.
- bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
- // quantize only 2D and 3D tensors (experts)
- quantize &= (ggml_n_dims(tensor) >= 2);
- // do not quantize norm tensors
- quantize &= name.find("_norm.weight") == std::string::npos;
- quantize &= params->quantize_output_tensor || name != "output.weight";
- quantize &= !params->only_copy;
- // do not quantize expert gating tensors
- // NOTE: can't use LLM_TN here because the layer number is not known
- quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
- // do not quantize positional embeddings and token types (BERT)
- quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight");
- quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_TOKEN_TYPES, "weight");
- // do not quantize Mamba's small yet 2D weights
- // NOTE: can't use LLM_TN here because the layer number is not known
- quantize &= name.find("ssm_conv1d.weight") == std::string::npos;
- // do not quantize RWKV's time_mix_first tensors
- quantize &= name.find("time_mix_first.weight") == std::string::npos;
- quantize &= name.find("time_mix_w1.weight") == std::string::npos;
- quantize &= name.find("time_mix_w2.weight") == std::string::npos;
- quantize &= name.find("time_mix_decay_w1.weight") == std::string::npos;
- quantize &= name.find("time_mix_decay_w2.weight") == std::string::npos;
- // do not quantize relative position bias (T5)
- quantize &= name.find("attn_rel_b.weight") == std::string::npos;
- enum ggml_type new_type;
- void * new_data;
- size_t new_size;
- if (quantize) {
- new_type = default_type;
- // get more optimal quantization type based on the tensor shape, layer, etc.
- if (!params->pure && ggml_is_quantized(default_type)) {
- new_type = llama_tensor_get_type(qs, new_type, tensor, ftype);
- }
- if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) {
- new_type = params->token_embedding_type;
- }
- if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) {
- new_type = params->output_tensor_type;
- }
- // If we've decided to quantize to the same type the tensor is already
- // in then there's nothing to do.
- quantize = tensor->type != new_type;
- }
- if (!quantize) {
- new_type = tensor->type;
- new_data = tensor->data;
- new_size = ggml_nbytes(tensor);
- LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
- } else {
- const int64_t nelements = ggml_nelements(tensor);
- const float * imatrix = nullptr;
- if (imatrix_data) {
- auto it = imatrix_data->find(tensor->name);
- if (it == imatrix_data->end()) {
- LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name);
- } else {
- if (it->second.size() == (size_t)tensor->ne[0]*tensor->ne[2]) {
- imatrix = it->second.data();
- } else {
- LLAMA_LOG_INFO("\n====== %s: imatrix size %d is different from tensor size %d for %s\n", __func__,
- int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name);
- // this can happen when quantizing an old mixtral model with split tensors with a new incompatible imatrix
- // this is a significant error and it may be good idea to abort the process if this happens,
- // since many people will miss the error and not realize that most of the model is being quantized without an imatrix
- // tok_embd should be ignored in this case, since it always causes this warning
- if (name != tn(LLM_TENSOR_TOKEN_EMBD, "weight")) {
- throw std::runtime_error(format("imatrix size %d is different from tensor size %d for %s",
- int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name));
- }
- }
- }
- }
- if ((new_type == GGML_TYPE_IQ2_XXS ||
- new_type == GGML_TYPE_IQ2_XS ||
- new_type == GGML_TYPE_IQ2_S ||
- new_type == GGML_TYPE_IQ1_S ||
- (new_type == GGML_TYPE_IQ1_M && strcmp(tensor->name, "token_embd.weight") && strcmp(tensor->name, "output.weight")) ||
- (new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) {
- LLAMA_LOG_ERROR("\n\n============================================================\n");
- LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name);
- LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");
- LLAMA_LOG_ERROR("============================================================\n\n");
- throw std::runtime_error(format("Missing importance matrix for tensor %s in a very low-bit quantization", tensor->name));
- }
- float * f32_data;
- if (tensor->type == GGML_TYPE_F32) {
- f32_data = (float *) tensor->data;
- } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
- throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
- } else {
- llama_tensor_dequantize_internal(tensor, f32_conv_buf, workers, nelements, nthread);
- f32_data = (float *) f32_conv_buf.data();
- }
- int chunk_size_multiplier = 1;
- if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8 || new_type == GGML_TYPE_Q4_0_8_8) {
- if ((new_type == GGML_TYPE_Q4_0_8_8) && (tensor->ne[1] % 8 != 0)) new_type = GGML_TYPE_Q4_0;
- else if (tensor->ne[1] % 4 != 0) new_type = GGML_TYPE_Q4_0;
- if (new_type == GGML_TYPE_Q4_0_8_8) chunk_size_multiplier = 8;
- else if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8) chunk_size_multiplier = 4;
- }
- LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type));
- fflush(stdout);
- if (work.size() < (size_t)nelements * 4) {
- work.resize(nelements * 4); // upper bound on size
- }
- new_data = work.data();
- const int64_t n_per_row = tensor->ne[0];
- const int64_t nrows = tensor->ne[1];
- static const int64_t min_chunk_size = 32 * 512;
- const int64_t chunk_size = (n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row)) *
- chunk_size_multiplier;
- const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1];
- const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size;
- const int64_t nthread_use = nthread > 1 ? std::max((int64_t)1, std::min((int64_t)nthread, nchunk)) : 1;
- // quantize each expert separately since they have different importance matrices
- new_size = 0;
- for (int64_t i03 = 0; i03 < tensor->ne[2]; ++i03) {
- const float * f32_data_03 = f32_data + i03 * nelements_matrix;
- void * new_data_03 = (char *)new_data + ggml_row_size(new_type, n_per_row) * i03 * nrows;
- const float * imatrix_03 = imatrix ? imatrix + i03 * n_per_row : nullptr;
- new_size += llama_tensor_quantize_internal(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use);
- }
- LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
- }
- total_size_org += ggml_nbytes(tensor);
- total_size_new += new_size;
- // update the gguf meta data as we go
- gguf_set_tensor_type(ctx_outs[cur_split], name.c_str(), new_type);
- gguf_set_tensor_data(ctx_outs[cur_split], name.c_str(), new_data, new_size);
- // write tensor data + padding
- fout.write((const char *) new_data, new_size);
- zeros(fout, GGML_PAD(new_size, align) - new_size);
- }
- close_ofstream();
- for (auto & c:ctx_outs) {
- gguf_free(c);
- }
- LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
- LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
- if (qs.n_fallback > 0) {
- LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) required fallback quantization\n",
- __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
- }
- }
- static void llama_lora_adapter_init_internal(struct llama_model * model, const char * path_lora, struct llama_lora_adapter & adapter) {
- LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
- ggml_context * ctx = nullptr;
- struct gguf_init_params meta_gguf_params = {
- /* .no_alloc = */ true,
- /* .ctx = */ &ctx,
- };
- struct gguf_context * ctx_gguf = gguf_init_from_file(path_lora, meta_gguf_params);
- if (!ctx_gguf) {
- throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
- }
- // check metadata
- {
- auto get_kv_str = [&](const std::string & key) -> std::string {
- int id = gguf_find_key(ctx_gguf, key.c_str());
- return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf, id));
- };
- auto get_kv_f32 = [&](const std::string & key) -> float {
- int id = gguf_find_key(ctx_gguf, key.c_str());
- return id < 0 ? 0.0f : gguf_get_val_f32(ctx_gguf, id);
- };
- LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
- auto general_type = get_kv_str(llm_kv(LLM_KV_GENERAL_TYPE));
- if (general_type != "adapter") {
- gguf_free(ctx_gguf);
- throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
- }
- auto general_arch_str = get_kv_str(llm_kv(LLM_KV_GENERAL_ARCHITECTURE));
- auto general_arch = llm_arch_from_string(general_arch_str);
- if (general_arch != model->arch) {
- gguf_free(ctx_gguf);
- throw std::runtime_error("model arch and LoRA arch mismatch");
- }
- auto adapter_type = get_kv_str(llm_kv(LLM_KV_ADAPTER_TYPE));
- if (adapter_type != "lora") {
- gguf_free(ctx_gguf);
- throw std::runtime_error("expect adapter.type to be 'lora', but got: " + adapter_type);
- }
- adapter.alpha = get_kv_f32(llm_kv(LLM_KV_ADAPTER_LORA_ALPHA));
- }
- int n_tensors = gguf_get_n_tensors(ctx_gguf);
- // contexts for each buffer type
- std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
- auto get_ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
- auto it = ctx_map.find(buft);
- if (it == ctx_map.end()) {
- // add a new context
- struct ggml_init_params params = {
- /*.mem_size =*/ n_tensors*ggml_tensor_overhead(),
- /*.mem_buffer =*/ NULL,
- /*.no_alloc =*/ true,
- };
- ggml_context * buft_ctx = ggml_init(params);
- ctx_map[buft] = buft_ctx;
- return buft_ctx;
- };
- return it->second;
- };
- // bundle lora_a and lora_b into pairs
- std::map<std::string, llama_lora_weight> ab_map;
- auto str_endswith = [](const std::string & str, const std::string & suffix) {
- return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
- };
- for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
- std::string name(cur->name);
- if (str_endswith(name, ".lora_a")) {
- replace_all(name, ".lora_a", "");
- if (ab_map.find(name) == ab_map.end()) {
- ab_map[name] = llama_lora_weight(cur, nullptr);
- } else {
- ab_map[name].a = cur;
- }
- } else if (str_endswith(name, ".lora_b")) {
- replace_all(name, ".lora_b", "");
- if (ab_map.find(name) == ab_map.end()) {
- ab_map[name] = llama_lora_weight(nullptr, cur);
- } else {
- ab_map[name].b = cur;
- }
- } else {
- gguf_free(ctx_gguf);
- ggml_free(ctx);
- throw std::runtime_error("LoRA tensor '" + name + "' has unexpected suffix");
- }
- }
- // add tensors
- for (auto & it : ab_map) {
- const std::string & name = it.first;
- llama_lora_weight & w = it.second;
- if (!w.a || !w.b) {
- gguf_free(ctx_gguf);
- ggml_free(ctx);
- throw std::runtime_error("LoRA tensor pair for '" + name + "' is missing one component");
- }
- // device buft and device ctx
- auto * model_tensor = llama_get_model_tensor(model, name.c_str());
- if (!model_tensor) {
- gguf_free(ctx_gguf);
- ggml_free(ctx);
- throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model");
- }
- struct ggml_context * dev_ctx = get_ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer));
- // validate tensor shape
- if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) {
- gguf_free(ctx_gguf);
- ggml_free(ctx);
- throw std::runtime_error("tensor '" + name + "' has incorrect shape");
- }
- if (w.a->ne[1] != w.b->ne[0]) {
- gguf_free(ctx_gguf);
- ggml_free(ctx);
- throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)");
- }
- // save tensor to adapter
- struct ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a);
- struct ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b);
- ggml_set_name(tensor_a, w.a->name);
- ggml_set_name(tensor_b, w.b->name);
- adapter.ab_map[name] = llama_lora_weight(tensor_a, tensor_b);
- }
- // allocate tensors / buffers and zero
- {
- adapter.ctxs.reserve(ctx_map.size());
- adapter.bufs.reserve(ctx_map.size());
- for (auto it : ctx_map) {
- ggml_backend_buffer_type_t buft = it.first;
- ggml_context * ctx_dev = it.second;
- ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx_dev, buft);
- if (!buf) {
- gguf_free(ctx_gguf);
- ggml_free(ctx);
- throw std::runtime_error("failed to allocate buffer for lora adapter\n");
- }
- LLAMA_LOG_INFO("%s: %10s LoRA buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
- adapter.ctxs.push_back(ctx_dev);
- adapter.bufs.push_back(buf);
- }
- }
- // set tensor data
- {
- llama_file gguf_file(path_lora, "rb");
- std::vector<uint8_t> read_buf;
- auto set_tensor = [&](struct ggml_tensor * orig, struct ggml_tensor * dev) {
- size_t offs = gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, gguf_find_tensor(ctx_gguf, orig->name));
- size_t size = ggml_nbytes(orig);
- read_buf.resize(size);
- gguf_file.seek(offs, SEEK_SET);
- gguf_file.read_raw(read_buf.data(), size);
- ggml_backend_tensor_set(dev, read_buf.data(), 0, size);
- };
- for (auto & it : adapter.ab_map) {
- auto orig = ab_map[it.first];
- auto dev = it.second;
- set_tensor(orig.a, dev.a);
- set_tensor(orig.b, dev.b);
- }
- }
- LLAMA_LOG_INFO("%s: loaded %ld tensors from lora file\n", __func__, adapter.ab_map.size()*2);
- // free ctx for reading gguf
- gguf_free(ctx_gguf);
- ggml_free(ctx);
- }
- int32_t llama_lora_adapter_set(
- struct llama_context * ctx,
- struct llama_lora_adapter * adapter,
- float scale) {
- if (ctx->cparams.flash_attn) {
- LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__);
- return -1;
- }
- ctx->lora_adapters[adapter] = scale;
- return 0;
- }
- int32_t llama_lora_adapter_remove(
- struct llama_context * ctx,
- struct llama_lora_adapter * adapter) {
- auto pos = ctx->lora_adapters.find(adapter);
- if (pos != ctx->lora_adapters.end()) {
- ctx->lora_adapters.erase(pos);
- return 0;
- }
- return -1;
- }
- void llama_lora_adapter_clear(struct llama_context * ctx) {
- ctx->lora_adapters.clear();
- }
- void llama_lora_adapter_free(struct llama_lora_adapter * adapter) {
- delete adapter;
- }
- //
- // interface implementation
- //
- struct llama_model_params llama_model_default_params() {
- struct llama_model_params result = {
- /*.n_gpu_layers =*/ 0,
- /*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER,
- /*.main_gpu =*/ 0,
- /*.tensor_split =*/ nullptr,
- /*.rpc_servers =*/ nullptr,
- /*.progress_callback =*/ nullptr,
- /*.progress_callback_user_data =*/ nullptr,
- /*.kv_overrides =*/ nullptr,
- /*.vocab_only =*/ false,
- /*.use_mmap =*/ true,
- /*.use_mlock =*/ false,
- /*.check_tensors =*/ false,
- };
- #ifdef GGML_USE_METAL
- // note: we usually have plenty of VRAM, so by default offload all layers to the GPU
- result.n_gpu_layers = 999;
- #endif
- return result;
- }
- struct llama_context_params llama_context_default_params() {
- struct llama_context_params result = {
- /*.n_ctx =*/ 512,
- /*.n_batch =*/ 2048,
- /*.n_ubatch =*/ 512,
- /*.n_seq_max =*/ 1,
- /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
- /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
- /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED,
- /*.pooling_type =*/ LLAMA_POOLING_TYPE_UNSPECIFIED,
- /*.attention_type =*/ LLAMA_ATTENTION_TYPE_UNSPECIFIED,
- /*.rope_freq_base =*/ 0.0f,
- /*.rope_freq_scale =*/ 0.0f,
- /*.yarn_ext_factor =*/ -1.0f,
- /*.yarn_attn_factor =*/ 1.0f,
- /*.yarn_beta_fast =*/ 32.0f,
- /*.yarn_beta_slow =*/ 1.0f,
- /*.yarn_orig_ctx =*/ 0,
- /*.defrag_thold =*/ -1.0f,
- /*.cb_eval =*/ nullptr,
- /*.cb_eval_user_data =*/ nullptr,
- /*.type_k =*/ GGML_TYPE_F16,
- /*.type_v =*/ GGML_TYPE_F16,
- /*.logits_all =*/ false,
- /*.embeddings =*/ false,
- /*.offload_kqv =*/ true,
- /*.flash_attn =*/ false,
- /*.no_perf =*/ true,
- /*.abort_callback =*/ nullptr,
- /*.abort_callback_data =*/ nullptr,
- };
- return result;
- }
- struct llama_sampler_chain_params llama_sampler_chain_default_params() {
- struct llama_sampler_chain_params result = {
- /*.no_perf =*/ true,
- };
- return result;
- }
- struct llama_model_quantize_params llama_model_quantize_default_params() {
- struct llama_model_quantize_params result = {
- /*.nthread =*/ 0,
- /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
- /*.output_tensor_type =*/ GGML_TYPE_COUNT,
- /*.token_embedding_type =*/ GGML_TYPE_COUNT,
- /*.allow_requantize =*/ false,
- /*.quantize_output_tensor =*/ true,
- /*.only_copy =*/ false,
- /*.pure =*/ false,
- /*.keep_split =*/ false,
- /*.imatrix =*/ nullptr,
- /*.kv_overrides =*/ nullptr,
- };
- return result;
- }
- size_t llama_max_devices(void) {
- #if defined(GGML_USE_RPC)
- return GGML_RPC_MAX_SERVERS;
- #elif defined(GGML_USE_METAL)
- return 1;
- #elif defined(GGML_USE_CUDA)
- return GGML_CUDA_MAX_DEVICES;
- #elif defined(GGML_USE_SYCL)
- return GGML_SYCL_MAX_DEVICES;
- #elif defined(GGML_USE_VULKAN)
- return GGML_VK_MAX_DEVICES;
- #elif defined(GGML_USE_CANN)
- return GGML_CANN_MAX_DEVICES;
- #else
- return 1;
- #endif
- }
- bool llama_supports_mmap(void) {
- return llama_mmap::SUPPORTED;
- }
- bool llama_supports_mlock(void) {
- return llama_mlock::SUPPORTED;
- }
- bool llama_supports_gpu_offload(void) {
- #if defined(GGML_USE_CUDA) || defined(GGML_USE_METAL) || defined(GGML_USE_VULKAN) || \
- defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE) || defined(GGML_USE_RPC)
- // Defined when llama.cpp is compiled with support for offloading model layers to GPU.
- return true;
- #else
- return false;
- #endif
- }
- void llama_backend_init(void) {
- ggml_time_init();
- // needed to initialize f16 tables
- {
- struct ggml_init_params params = { 0, NULL, false };
- struct ggml_context * ctx = ggml_init(params);
- ggml_free(ctx);
- }
- }
- void llama_numa_init(enum ggml_numa_strategy numa) {
- if (numa != GGML_NUMA_STRATEGY_DISABLED) {
- ggml_numa_init(numa);
- }
- }
- void llama_attach_threadpool(
- struct llama_context * ctx,
- ggml_threadpool_t threadpool,
- ggml_threadpool_t threadpool_batch) {
- ctx->threadpool = threadpool;
- ctx->threadpool_batch = threadpool_batch ? threadpool_batch : threadpool;
- }
- void llama_detach_threadpool(struct llama_context * ctx) {
- ctx->threadpool = nullptr;
- ctx->threadpool_batch = nullptr;
- }
- void llama_backend_free(void) {
- ggml_quantize_free();
- }
- int64_t llama_time_us(void) {
- return ggml_time_us();
- }
- struct llama_model * llama_load_model_from_file(
- const char * path_model,
- struct llama_model_params params) {
- ggml_time_init();
- llama_model * model = new llama_model;
- unsigned cur_percentage = 0;
- if (params.progress_callback == NULL) {
- params.progress_callback_user_data = &cur_percentage;
- params.progress_callback = [](float progress, void * ctx) {
- unsigned * cur_percentage_p = (unsigned *) ctx;
- unsigned percentage = (unsigned) (100 * progress);
- while (percentage > *cur_percentage_p) {
- *cur_percentage_p = percentage;
- LLAMA_LOG_CONT(".");
- if (percentage >= 100) {
- LLAMA_LOG_CONT("\n");
- }
- }
- return true;
- };
- }
- if (params.rpc_servers != nullptr && params.rpc_servers[0] != '\0') {
- // split the servers set them into model->rpc_servers
- std::string servers(params.rpc_servers);
- size_t pos = 0;
- while ((pos = servers.find(",")) != std::string::npos) {
- std::string server = servers.substr(0, pos);
- model->rpc_servers.push_back(server);
- servers.erase(0, pos + 1);
- }
- model->rpc_servers.push_back(servers);
- }
- int status = llama_model_load(path_model, *model, params);
- GGML_ASSERT(status <= 0);
- if (status < 0) {
- if (status == -1) {
- LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
- } else if (status == -2) {
- LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
- }
- delete model;
- return nullptr;
- }
- return model;
- }
- void llama_free_model(struct llama_model * model) {
- delete model;
- }
- struct llama_context * llama_new_context_with_model(
- struct llama_model * model,
- struct llama_context_params params) {
- if (!model) {
- LLAMA_LOG_ERROR("%s: model cannot be NULL\n", __func__);
- return nullptr;
- }
- if (params.n_batch == 0 && params.n_ubatch == 0) {
- LLAMA_LOG_ERROR("%s: n_batch and n_ubatch cannot both be zero\n", __func__);
- return nullptr;
- }
- if (params.n_ctx == 0 && model->hparams.n_ctx_train == 0) {
- LLAMA_LOG_ERROR("%s: n_ctx and model->hparams.n_ctx_train cannot both be zero\n", __func__);
- return nullptr;
- }
- if (params.flash_attn && model->arch == LLM_ARCH_GROK) {
- LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__);
- params.flash_attn = false;
- }
- if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) {
- LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__);
- params.flash_attn = false;
- }
- if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) {
- LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
- return nullptr;
- }
- llama_context * ctx = new llama_context(*model);
- const auto & hparams = model->hparams;
- auto & cparams = ctx->cparams;
- cparams.n_seq_max = std::max(1u, params.n_seq_max);
- cparams.n_threads = params.n_threads;
- cparams.n_threads_batch = params.n_threads_batch;
- cparams.yarn_ext_factor = params.yarn_ext_factor;
- cparams.yarn_attn_factor = params.yarn_attn_factor;
- cparams.yarn_beta_fast = params.yarn_beta_fast;
- cparams.yarn_beta_slow = params.yarn_beta_slow;
- cparams.defrag_thold = params.defrag_thold;
- cparams.embeddings = params.embeddings;
- cparams.offload_kqv = params.offload_kqv;
- cparams.flash_attn = params.flash_attn;
- cparams.no_perf = params.no_perf;
- cparams.pooling_type = params.pooling_type;
- cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
- cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
- cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
- // this is necessary due to kv_self.n being padded later during inference
- cparams.n_ctx = GGML_PAD(cparams.n_ctx, llama_kv_cache_get_padding(cparams));
- // with causal attention, the batch size is limited by the context size
- cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch;
- // the batch has to be at least GGML_KQ_MASK_PAD because we will be padding the KQ_mask
- // this is required by GPU kernels in order to avoid out-of-bounds accesses (e.g. ggml_flash_attn_ext)
- // ref: https://github.com/ggerganov/llama.cpp/pull/5021
- if (cparams.n_batch < GGML_KQ_MASK_PAD) {
- LLAMA_LOG_WARN("%s: n_batch is less than GGML_KQ_MASK_PAD - increasing to %d\n", __func__, GGML_KQ_MASK_PAD);
- cparams.n_batch = GGML_KQ_MASK_PAD;
- }
- cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);
- cparams.n_ctx_orig_yarn = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx :
- hparams.n_ctx_orig_yarn != 0 ? hparams.n_ctx_orig_yarn :
- hparams.n_ctx_train;
- cparams.cb_eval = params.cb_eval;
- cparams.cb_eval_user_data = params.cb_eval_user_data;
- auto rope_scaling_type = params.rope_scaling_type;
- if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED) {
- rope_scaling_type = hparams.rope_scaling_type_train;
- }
- if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_NONE) {
- cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none
- }
- if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set'
- cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_YARN ? 1.0f : 0.0f;
- }
- cparams.yarn_attn_factor *= hparams.rope_attn_factor;
- if (cparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
- if (hparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) {
- cparams.pooling_type = LLAMA_POOLING_TYPE_NONE;
- } else {
- cparams.pooling_type = hparams.pooling_type;
- }
- }
- if (params.attention_type == LLAMA_ATTENTION_TYPE_UNSPECIFIED) {
- cparams.causal_attn = hparams.causal_attn;
- } else {
- cparams.causal_attn = params.attention_type == LLAMA_ATTENTION_TYPE_CAUSAL;
- }
- LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx);
- LLAMA_LOG_INFO("%s: n_batch = %u\n", __func__, cparams.n_batch);
- LLAMA_LOG_INFO("%s: n_ubatch = %u\n", __func__, cparams.n_ubatch);
- LLAMA_LOG_INFO("%s: flash_attn = %d\n", __func__, cparams.flash_attn);
- LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
- LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
- ctx->abort_callback = params.abort_callback;
- ctx->abort_callback_data = params.abort_callback_data;
- ctx->logits_all = params.logits_all;
- // build worst-case graph for encoder if a model contains encoder
- ctx->is_encoding = llama_model_has_encoder(model);
- uint32_t kv_size = cparams.n_ctx;
- ggml_type type_k = params.type_k;
- ggml_type type_v = params.type_v;
- // Mamba only needs a constant number of KV cache cells per sequence
- if (llama_model_is_recurrent(model)) {
- // Mamba needs at least as many KV cells as there are sequences kept at any time
- kv_size = std::max((uint32_t) 1, params.n_seq_max);
- // it's probably best to keep as much precision as possible for the states
- type_k = GGML_TYPE_F32; // required by ggml_ssm_conv for Mamba's conv_states
- type_v = GGML_TYPE_F32; // required by ggml_ssm_scan for Mamba's ssm_states
- }
- GGML_ASSERT(hparams.n_embd_head_k % ggml_blck_size(type_k) == 0);
- GGML_ASSERT(hparams.n_embd_head_v % ggml_blck_size(type_v) == 0);
- if (!hparams.vocab_only) {
- // initialize backends
- #if defined(GGML_USE_RPC)
- if (model->n_gpu_layers > 0) {
- for (const auto & endpoint : model->rpc_servers) {
- ggml_backend_t backend = ggml_backend_rpc_init(endpoint.c_str());
- if (backend == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to initialize RPC to '%s'\n", __func__, endpoint.c_str());
- llama_free(ctx);
- return nullptr;
- }
- ctx->backends.push_back(backend);
- }
- }
- #endif
- #if defined(GGML_USE_METAL)
- if (model->n_gpu_layers > 0) {
- ctx->backend_metal = ggml_backend_metal_init();
- if (ctx->backend_metal == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to initialize Metal backend\n", __func__);
- llama_free(ctx);
- return nullptr;
- }
- ctx->backends.push_back(ctx->backend_metal);
- }
- #elif defined(GGML_USE_CUDA)
- if (model->split_mode == LLAMA_SPLIT_MODE_NONE || model->split_mode == LLAMA_SPLIT_MODE_ROW) {
- // with split_mode LLAMA_SPLIT_MODE_NONE or LLAMA_SPLIT_MODE_ROW, only the main GPU backend is used
- ggml_backend_t backend = ggml_backend_cuda_init(model->main_gpu);
- if (backend == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, model->main_gpu);
- llama_free(ctx);
- return nullptr;
- }
- ctx->backends.push_back(backend);
- } else {
- // LLAMA_SPLIT_MODE_LAYER requires a backend for each GPU
- for (int device = 0; device < ggml_backend_cuda_get_device_count(); ++device) {
- ggml_backend_t backend = ggml_backend_cuda_init(device);
- if (backend == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, device);
- llama_free(ctx);
- return nullptr;
- }
- ctx->backends.push_back(backend);
- }
- }
- #elif defined(GGML_USE_VULKAN)
- if (model->split_mode == LLAMA_SPLIT_MODE_ROW) {
- LLAMA_LOG_ERROR("%s: Row split not supported. Failed to initialize Vulkan backend\n", __func__);
- llama_free(ctx);
- return nullptr;
- }
- if (model->split_mode == LLAMA_SPLIT_MODE_NONE) {
- ggml_backend_t backend = ggml_backend_vk_init(model->main_gpu);
- if (backend == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to initialize Vulkan backend\n", __func__);
- llama_free(ctx);
- return nullptr;
- }
- ctx->backends.push_back(backend);
- } else {
- for (int device = 0; device < ggml_backend_vk_get_device_count(); ++device) {
- ggml_backend_t backend = ggml_backend_vk_init(device);
- if (backend == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to initialize Vulkan%d backend\n", __func__, device);
- llama_free(ctx);
- return nullptr;
- }
- ctx->backends.push_back(backend);
- }
- }
- #elif defined(GGML_USE_SYCL)
- // with split_mode LLAMA_SPLIT_MODE_NONE or LLAMA_SPLIT_MODE_ROW, only the main GPU backend is used
- if (model->split_mode == LLAMA_SPLIT_MODE_NONE || model->split_mode == LLAMA_SPLIT_MODE_ROW) {
- ggml_backend_t backend = ggml_backend_sycl_init(model->main_gpu);
- if (backend == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to initialize SYCL%d backend\n", __func__, model->main_gpu);
- llama_free(ctx);
- return nullptr;
- }
- ctx->backends.push_back(backend);
- } else {
- // LLAMA_SPLIT_LAYER requires a backend for each GPU
- for (int i = 0; i < ggml_backend_sycl_get_device_count(); ++i) {
- ggml_backend_t backend = ggml_backend_sycl_init(i);
- if (backend == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to initialize SYCL%d for No.%d backend\n", __func__, i, i);
- llama_free(ctx);
- return nullptr;
- }
- ctx->backends.push_back(backend);
- }
- }
- #elif defined(GGML_USE_KOMPUTE)
- if (model->n_gpu_layers > 0) {
- auto * backend = ggml_backend_kompute_init(model->main_gpu);
- if (backend == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to initialize Kompute backend\n", __func__);
- llama_free(ctx);
- return nullptr;
- }
- ctx->backends.push_back(backend);
- }
- #elif defined(GGML_USE_CANN)
- // with split_mode LLAMA_SPLIT_MODE_NONE or LLAMA_SPLIT_MODE_ROW, only the main GPU backend is used
- // TODO: ggml_backend_cann is not support split tensor now, just leave code here.
- if (model->split_mode == LLAMA_SPLIT_MODE_NONE || model->split_mode == LLAMA_SPLIT_MODE_ROW) {
- ggml_backend_t backend = ggml_backend_cann_init(model->main_gpu);
- if (backend == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to initialize CANN%d backend\n", __func__, model->main_gpu);
- llama_free(ctx);
- return nullptr;
- }
- ctx->backends.push_back(backend);
- } else {
- // LLAMA_SPLIT_MODE_LAYER requires a backend for each GPU
- // TODO: currently, CANN can't use multi-gpus, just leave code here for further cann version.
- for (int32_t device = 0; device < ggml_backend_cann_get_device_count(); ++device) {
- ggml_backend_t backend = ggml_backend_cann_init(device);
- if (backend == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to initialize CANN%d backend\n", __func__, device);
- llama_free(ctx);
- return nullptr;
- }
- ctx->backends.push_back(backend);
- }
- }
- #endif
- #ifdef GGML_USE_BLAS
- ctx->backend_blas = ggml_backend_blas_init();
- if (ctx->backend_blas == nullptr) {
- LLAMA_LOG_WARN("%s: failed to initialize BLAS backend\n", __func__);
- } else {
- ctx->backends.push_back(ctx->backend_blas);
- }
- #endif
- ctx->backend_cpu = ggml_backend_cpu_init();
- if (ctx->backend_cpu == nullptr) {
- LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__);
- llama_free(ctx);
- return nullptr;
- }
- ctx->backends.push_back(ctx->backend_cpu);
- if (!llama_kv_cache_init(ctx->kv_self, ctx, type_k, type_v, kv_size, cparams.offload_kqv)) {
- LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__);
- llama_free(ctx);
- return nullptr;
- }
- {
- size_t memory_size_k = 0;
- size_t memory_size_v = 0;
- for (auto & k : ctx->kv_self.k_l) {
- memory_size_k += ggml_nbytes(k);
- }
- for (auto & v : ctx->kv_self.v_l) {
- memory_size_v += ggml_nbytes(v);
- }
- LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
- (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
- ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
- ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
- }
- // graph outputs buffer
- {
- // resized during inference when a batch uses more outputs
- if (llama_output_reserve(*ctx, params.n_seq_max) < params.n_seq_max) {
- LLAMA_LOG_ERROR("%s: failed to reserve initial output buffer\n", __func__);
- llama_free(ctx);
- return nullptr;
- }
- LLAMA_LOG_INFO("%s: %10s output buffer size = %8.2f MiB\n", __func__,
- ggml_backend_buffer_name(ctx->buf_output),
- ggml_backend_buffer_get_size(ctx->buf_output) / 1024.0 / 1024.0);
- }
- // scheduler and compute buffers
- {
- // buffer types used for the compute buffer of each backend
- std::vector<ggml_backend_buffer_type_t> backend_buft;
- for (auto * backend : ctx->backends) {
- if (ggml_backend_is_cpu(backend)) {
- // use host buffers for the CPU backend compute buffer
- backend_buft.push_back(llama_default_buffer_type_cpu(true));
- } else {
- backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
- }
- }
- const size_t max_nodes = llama_model_max_nodes(*model);
- // buffer used to store the computation graph and the tensor meta data
- ctx->buf_compute_meta.resize(ggml_tensor_overhead()*max_nodes + ggml_graph_overhead_custom(max_nodes, false));
- // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary
- bool pipeline_parallel =
- llama_get_device_count(*model) > 1 &&
- model->n_gpu_layers > (int)model->hparams.n_layer &&
- model->split_mode == LLAMA_SPLIT_MODE_LAYER &&
- params.offload_kqv;
- #ifndef GGML_USE_CUDA
- // pipeline parallelism requires support for async compute and events
- // currently this is only implemented in the CUDA backend
- pipeline_parallel = false;
- #endif
- ctx->sched = ggml_backend_sched_new(ctx->backends.data(), backend_buft.data(), ctx->backends.size(), max_nodes, pipeline_parallel);
- if (pipeline_parallel) {
- LLAMA_LOG_INFO("%s: pipeline parallelism enabled (n_copies=%d)\n", __func__, ggml_backend_sched_get_n_copies(ctx->sched));
- }
- // build worst-case graph
- uint32_t n_seqs = 1; // TODO: worst-case number of sequences
- uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
- llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
- llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr};
- ggml_cgraph * gf = llama_build_graph(*ctx, ubatch, true);
- // initialize scheduler with the worst-case graph
- if (!ggml_backend_sched_reserve(ctx->sched, gf)) {
- LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
- llama_free(ctx);
- return nullptr;
- }
- for (size_t i = 0; i < ctx->backends.size(); i++) {
- ggml_backend_t backend = ctx->backends[i];
- ggml_backend_buffer_type_t buft = backend_buft[i];
- size_t size = ggml_backend_sched_get_buffer_size(ctx->sched, backend);
- if (size > 1) {
- LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__,
- ggml_backend_buft_name(buft),
- size / 1024.0 / 1024.0);
- }
- }
- // note: the number of splits during measure is higher than during inference due to the kv shift
- int n_splits = ggml_backend_sched_get_n_splits(ctx->sched);
- LLAMA_LOG_INFO("%s: graph nodes = %d\n", __func__, ggml_graph_n_nodes(gf));
- LLAMA_LOG_INFO("%s: graph splits = %d\n", __func__, n_splits);
- }
- }
- return ctx;
- }
- void llama_free(struct llama_context * ctx) {
- delete ctx;
- }
- uint32_t llama_n_ctx(const struct llama_context * ctx) {
- return ctx->cparams.n_ctx;
- }
- uint32_t llama_n_batch(const struct llama_context * ctx) {
- return ctx->cparams.n_batch;
- }
- uint32_t llama_n_ubatch(const struct llama_context * ctx) {
- return ctx->cparams.n_ubatch;
- }
- uint32_t llama_n_seq_max(const struct llama_context * ctx) {
- return ctx->kv_self.size;
- }
- enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
- return model->vocab.type;
- }
- int32_t llama_n_vocab(const struct llama_model * model) {
- return model->hparams.n_vocab;
- }
- int32_t llama_n_ctx_train(const struct llama_model * model) {
- return model->hparams.n_ctx_train;
- }
- int32_t llama_n_embd(const struct llama_model * model) {
- return model->hparams.n_embd;
- }
- int32_t llama_n_layer(const struct llama_model * model) {
- return model->hparams.n_layer;
- }
- int32_t llama_n_head(const struct llama_model * model) {
- return model->hparams.n_head();
- }
- const struct llama_model * llama_get_model(const struct llama_context * ctx) {
- return &ctx->model;
- }
- enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx) {
- return ctx->cparams.pooling_type;
- }
- enum llama_rope_type llama_rope_type(const struct llama_model * model) {
- switch (model->arch) {
- // these models do not use RoPE
- case LLM_ARCH_GPT2:
- case LLM_ARCH_GPTJ:
- case LLM_ARCH_MPT:
- case LLM_ARCH_REFACT:
- case LLM_ARCH_BLOOM:
- case LLM_ARCH_MAMBA:
- case LLM_ARCH_JINA_BERT_V2:
- case LLM_ARCH_T5:
- case LLM_ARCH_T5ENCODER:
- case LLM_ARCH_JAIS:
- case LLM_ARCH_RWKV6:
- return LLAMA_ROPE_TYPE_NONE;
- // use what we call a normal RoPE, operating on pairs of consecutive head values
- case LLM_ARCH_LLAMA:
- case LLM_ARCH_MLLAMA:
- case LLM_ARCH_BAICHUAN:
- case LLM_ARCH_STARCODER:
- case LLM_ARCH_PLAMO:
- case LLM_ARCH_ORION:
- case LLM_ARCH_INTERNLM2:
- case LLM_ARCH_MINICPM:
- case LLM_ARCH_XVERSE:
- case LLM_ARCH_COMMAND_R:
- case LLM_ARCH_OLMO:
- case LLM_ARCH_ARCTIC:
- case LLM_ARCH_DEEPSEEK2:
- case LLM_ARCH_CHATGLM:
- case LLM_ARCH_GRANITE:
- case LLM_ARCH_GRANITE_MOE:
- case LLM_ARCH_CHAMELEON:
- case LLM_ARCH_SOLAR:
- return LLAMA_ROPE_TYPE_NORM;
- // the pairs of head values are offset by n_rot/2
- case LLM_ARCH_FALCON:
- case LLM_ARCH_GROK:
- case LLM_ARCH_DBRX:
- case LLM_ARCH_BERT:
- case LLM_ARCH_NOMIC_BERT:
- case LLM_ARCH_STABLELM:
- case LLM_ARCH_BITNET:
- case LLM_ARCH_QWEN:
- case LLM_ARCH_QWEN2:
- case LLM_ARCH_QWEN2MOE:
- case LLM_ARCH_OLMOE:
- case LLM_ARCH_PHI2:
- case LLM_ARCH_PHI3:
- case LLM_ARCH_GEMMA:
- case LLM_ARCH_GEMMA2:
- case LLM_ARCH_STARCODER2:
- case LLM_ARCH_OPENELM:
- case LLM_ARCH_GPTNEOX:
- case LLM_ARCH_CODESHELL:
- case LLM_ARCH_NEMOTRON:
- case LLM_ARCH_EXAONE:
- case LLM_ARCH_MINICPM3:
- return LLAMA_ROPE_TYPE_NEOX;
- // all model arches should be listed explicitly here
- case LLM_ARCH_UNKNOWN:
- GGML_ABORT("unknown architecture");
- }
- return LLAMA_ROPE_TYPE_NONE;
- }
- float llama_rope_freq_scale_train(const struct llama_model * model) {
- return model->hparams.rope_freq_scale_train;
- }
- int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
- const auto & it = model->gguf_kv.find(key);
- if (it == model->gguf_kv.end()) {
- if (buf_size > 0) {
- buf[0] = '\0';
- }
- return -1;
- }
- return snprintf(buf, buf_size, "%s", it->second.c_str());
- }
- int32_t llama_model_meta_count(const struct llama_model * model) {
- return (int)model->gguf_kv.size();
- }
- int32_t llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
- if (i < 0 || i >= (int)model->gguf_kv.size()) {
- if (buf_size > 0) {
- buf[0] = '\0';
- }
- return -1;
- }
- auto it = model->gguf_kv.begin();
- std::advance(it, i);
- return snprintf(buf, buf_size, "%s", it->first.c_str());
- }
- int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size) {
- if (i < 0 || i >= (int)model->gguf_kv.size()) {
- if (buf_size > 0) {
- buf[0] = '\0';
- }
- return -1;
- }
- auto it = model->gguf_kv.begin();
- std::advance(it, i);
- return snprintf(buf, buf_size, "%s", it->second.c_str());
- }
- int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
- return snprintf(buf, buf_size, "%s %s %s",
- llama_model_arch_name(model->arch),
- llama_model_type_name(model->type),
- llama_model_ftype_name(model->ftype).c_str());
- }
- uint64_t llama_model_size(const struct llama_model * model) {
- uint64_t size = 0;
- for (const auto & it : model->tensors_by_name) {
- size += ggml_nbytes(it.second);
- }
- return size;
- }
- uint64_t llama_model_n_params(const struct llama_model * model) {
- uint64_t nparams = 0;
- for (const auto & it : model->tensors_by_name) {
- nparams += ggml_nelements(it.second);
- }
- return nparams;
- }
- struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name) {
- auto it = std::find_if(model->tensors_by_name.begin(), model->tensors_by_name.end(),
- [name](const std::pair<std::string, struct ggml_tensor *> & it) {
- return it.first == name;
- });
- if (it == model->tensors_by_name.end()) {
- return nullptr;
- }
- return it->second;
- }
- bool llama_model_has_encoder(const struct llama_model * model) {
- switch (model->arch) {
- case LLM_ARCH_T5: return true;
- case LLM_ARCH_T5ENCODER: return true;
- default: return false;
- }
- }
- bool llama_model_has_decoder(const struct llama_model * model) {
- switch (model->arch) {
- case LLM_ARCH_T5ENCODER: return false;
- default: return true;
- }
- }
- llama_token llama_model_decoder_start_token(const struct llama_model * model) {
- return model->hparams.dec_start_token_id;
- }
- bool llama_model_is_recurrent(const struct llama_model * model) {
- switch (model->arch) {
- case LLM_ARCH_MAMBA: return true;
- case LLM_ARCH_RWKV6: return true;
- default: return false;
- }
- }
- uint32_t llama_model_quantize(
- const char * fname_inp,
- const char * fname_out,
- const llama_model_quantize_params * params) {
- try {
- llama_model_quantize_internal(fname_inp, fname_out, params);
- return 0;
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
- return 1;
- }
- }
- struct llama_lora_adapter * llama_lora_adapter_init(struct llama_model * model, const char * path_lora) {
- try {
- struct llama_lora_adapter * adapter = new llama_lora_adapter(model);
- llama_lora_adapter_init_internal(model, path_lora, *adapter);
- return adapter;
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
- return nullptr;
- }
- }
- static bool llama_control_vector_init(struct llama_control_vector & cvec, const llama_model & model) {
- GGML_ASSERT(cvec.tensors.empty());
- GGML_ASSERT(cvec.ctxs.empty());
- GGML_ASSERT(cvec.bufs.empty());
- // count layer buffer types
- std::map<ggml_backend_buffer_type_t, int> buft_layer_count;
- for (int64_t i = 0; i < model.hparams.n_layer; i++) {
- buft_layer_count[model.buft_layer[i].buft]++;
- }
- // allocate contexts
- std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
- for (auto & it : buft_layer_count) {
- int n_layers = it.second;
- struct ggml_init_params params = {
- /*.mem_size =*/ n_layers * ggml_tensor_overhead(),
- /*.mem_buffer =*/ NULL,
- /*.no_alloc =*/ true,
- };
- ggml_context * ctx = ggml_init(params);
- if (!ctx) {
- LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__);
- return 1;
- }
- ctx_map[it.first] = ctx;
- }
- // make tensors
- cvec.tensors.reserve(model.hparams.n_layer);
- cvec.tensors.push_back(nullptr); // there's never a tensor for layer 0
- for (size_t il = 1; il < model.hparams.n_layer; il++) {
- struct ggml_context * ctx = ctx_map.at(model.buft_layer[il].buft);
- ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd);
- cvec.tensors.push_back(tensor);
- }
- // allocate tensors / buffers and zero
- cvec.ctxs.reserve(ctx_map.size());
- cvec.bufs.reserve(ctx_map.size());
- for (auto it : ctx_map) {
- ggml_backend_buffer_type_t buft = it.first;
- ggml_context * ctx = it.second;
- ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
- if (!buf) {
- LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
- return false;
- }
- ggml_backend_buffer_clear(buf, 0);
- cvec.ctxs.push_back(ctx);
- cvec.bufs.push_back(buf);
- }
- return true;
- }
- int32_t llama_control_vector_apply(struct llama_context * lctx, const float * data, size_t len, int32_t n_embd, int32_t il_start, int32_t il_end) {
- const llama_model & model = lctx->model;
- llama_control_vector & cvec = lctx->cvec;
- if (data == nullptr) {
- // disable the current control vector (but leave allocated for later)
- cvec.layer_start = -1;
- cvec.layer_end = -1;
- return 0;
- }
- if (n_embd != (int) model.hparams.n_embd) {
- LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
- return 1;
- }
- if (cvec.tensors.empty()) {
- if (!llama_control_vector_init(cvec, model)) {
- return 1;
- }
- }
- cvec.layer_start = il_start;
- cvec.layer_end = il_end;
- for (size_t il = 1; il < model.hparams.n_layer; il++) {
- assert(cvec.tensors[il] != nullptr);
- const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
- if (off + n_embd <= len) {
- ggml_backend_tensor_set(cvec.tensors[il], data + off, 0, n_embd * ggml_element_size(cvec.tensors[il]));
- }
- }
- return 0;
- }
- struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max) {
- struct llama_kv_cache_view result = {
- /*.n_cells = */ 0,
- /*.n_seq_max = */ n_seq_max,
- /*.token_count = */ 0,
- /*.used_cells = */ llama_get_kv_cache_used_cells(ctx),
- /*.max_contiguous = */ 0,
- /*.max_contiguous_idx = */ -1,
- /*.cells = */ nullptr,
- /*.cells_sequences = */ nullptr,
- };
- return result;
- }
- void llama_kv_cache_view_free(struct llama_kv_cache_view * view) {
- if (view->cells != nullptr) {
- free(view->cells);
- view->cells = nullptr;
- }
- if (view->cells_sequences != nullptr) {
- free(view->cells_sequences);
- view->cells_sequences = nullptr;
- }
- }
- void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view) {
- if (uint32_t(view->n_cells) < ctx->kv_self.size || view->cells == nullptr) {
- view->n_cells = int32_t(ctx->kv_self.size);
- void * p = realloc(view->cells, sizeof(struct llama_kv_cache_view_cell) * view->n_cells);
- GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells");
- view->cells = (struct llama_kv_cache_view_cell *)p;
- p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_seq_max * view->n_cells);
- GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences");
- view->cells_sequences = (llama_seq_id *)p;
- }
- const std::vector<llama_kv_cell> & kv_cells = ctx->kv_self.cells;
- llama_kv_cache_view_cell * c_curr = view->cells;
- llama_seq_id * cs_curr = view->cells_sequences;
- int32_t used_cells = 0;
- int32_t token_count = 0;
- int32_t curr_contig_idx = -1;
- uint32_t max_contig = 0;
- int32_t max_contig_idx = -1;
- for (int32_t i = 0; i < int32_t(ctx->kv_self.size); i++, c_curr++, cs_curr += view->n_seq_max) {
- const size_t curr_size = kv_cells[i].seq_id.size();
- token_count += curr_size;
- c_curr->pos = kv_cells[i].pos + kv_cells[i].delta;
- if (curr_size > 0) {
- if (curr_contig_idx >= 0 && uint32_t(i - curr_contig_idx) > max_contig) {
- max_contig = i - curr_contig_idx;
- max_contig_idx = curr_contig_idx;
- }
- curr_contig_idx = -1;
- } else if (curr_contig_idx < 0) {
- curr_contig_idx = i;
- }
- int seq_idx = 0;
- for (const llama_seq_id it : kv_cells[i].seq_id) {
- if (seq_idx >= view->n_seq_max) {
- break;
- }
- cs_curr[seq_idx] = it;
- seq_idx++;
- }
- if (seq_idx != 0) {
- used_cells++;
- }
- for (; seq_idx < view->n_seq_max; seq_idx++) {
- cs_curr[seq_idx] = -1;
- }
- }
- if (curr_contig_idx >= 0 && kv_cells.size() - curr_contig_idx > max_contig) {
- max_contig_idx = curr_contig_idx;
- max_contig = kv_cells.size() - curr_contig_idx;
- }
- view->max_contiguous = max_contig;
- view->max_contiguous_idx = max_contig_idx;
- view->token_count = token_count;
- view->used_cells = used_cells;
- if (uint32_t(used_cells) != ctx->kv_self.used) {
- LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n",
- __func__, ctx->kv_self.used, used_cells);
- }
- }
- int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx) {
- int result = 0;
- for (uint32_t i = 0; i < ctx->kv_self.size; i++) {
- result += ctx->kv_self.cells[i].seq_id.size();
- }
- return result;
- }
- int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx) {
- return ctx->kv_self.used;
- }
- void llama_kv_cache_clear(struct llama_context * ctx) {
- llama_kv_cache_clear(ctx->kv_self);
- }
- bool llama_kv_cache_seq_rm(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
- return llama_kv_cache_seq_rm(ctx->kv_self, seq_id, p0, p1);
- }
- void llama_kv_cache_seq_cp(struct llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
- if (seq_id_src == seq_id_dst) {
- return;
- }
- llama_kv_cache_seq_cp(ctx->kv_self, seq_id_src, seq_id_dst, p0, p1);
- }
- void llama_kv_cache_seq_keep(struct llama_context * ctx, llama_seq_id seq_id) {
- llama_kv_cache_seq_keep(ctx->kv_self, seq_id);
- }
- void llama_kv_cache_seq_add(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) {
- if (delta == 0) {
- return;
- }
- llama_kv_cache_seq_add(ctx->kv_self, seq_id, p0, p1, delta);
- }
- void llama_kv_cache_seq_div(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
- if (d == 1) {
- return;
- }
- llama_kv_cache_seq_div(ctx->kv_self, seq_id, p0, p1, d);
- }
- llama_pos llama_kv_cache_seq_pos_max(struct llama_context * ctx, llama_seq_id seq_id) {
- return llama_kv_cache_seq_pos_max(ctx->kv_self, seq_id);
- }
- void llama_kv_cache_defrag(struct llama_context * ctx) {
- llama_kv_cache_defrag(ctx->kv_self);
- }
- void llama_kv_cache_update(struct llama_context * ctx) {
- llama_kv_cache_update_internal(*ctx);
- }
- // deprecated
- size_t llama_get_state_size(struct llama_context * ctx) {
- return llama_state_get_size(ctx);
- }
- // deprecated
- size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
- return llama_state_get_data(ctx, dst, -1);
- }
- // deprecated
- size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src) {
- return llama_state_set_data(ctx, src, -1);
- }
- // deprecated
- bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
- return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
- }
- // deprecated
- bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
- return llama_state_save_file(ctx, path_session, tokens, n_token_count);
- }
- // TODO: replace all non-fatal assertions with returned errors or exceptions
- struct llama_data_write {
- virtual void write(const void * src, size_t size) = 0;
- virtual void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) = 0;
- virtual size_t get_size_written() = 0;
- virtual ~llama_data_write() = default;
- void write_string(const std::string & str) {
- uint32_t str_size = str.size();
- write(&str_size, sizeof(str_size));
- write(str.data(), str_size);
- }
- void write_model_info(const struct llama_context * ctx) {
- std::string arch_str = LLM_ARCH_NAMES.at(ctx->model.arch);
- write_string(arch_str);
- // TODO: add more model-specific info which should prevent loading the session file if not identical
- }
- //void write_rng(const std::mt19937 & rng) {
- // std::ostringstream rng_ss;
- // rng_ss << rng;
- // const std::string & rng_str = rng_ss.str();
- // write_string(rng_str);
- //}
- void write_output_ids(struct llama_context * ctx) {
- llama_output_reorder(ctx);
- const uint32_t n_outputs = ctx->n_outputs;
- std::vector<int32_t> output_pos;
- const size_t n_batch = ctx->cparams.n_batch;
- const auto & output_ids = ctx->output_ids;
- GGML_ASSERT(n_outputs <= ctx->output_size);
- output_pos.resize(n_outputs);
- // build a more compact representation of the output ids
- for (size_t i = 0; i < n_batch; ++i) {
- // map an output id to a position in the batch
- int32_t pos = output_ids[i];
- if (pos >= 0) {
- GGML_ASSERT((uint32_t) pos < n_outputs);
- output_pos[pos] = i;
- }
- }
- write(&n_outputs, sizeof(n_outputs));
- if (n_outputs) {
- write(output_pos.data(), n_outputs * sizeof(int32_t));
- }
- }
- void write_logits(const struct llama_context * ctx) {
- const uint64_t logits_size = std::min((uint64_t) ctx->logits_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_vocab);
- write(&logits_size, sizeof(logits_size));
- if (logits_size) {
- write(ctx->logits, logits_size * sizeof(float));
- }
- }
- void write_embeddings(const struct llama_context * ctx) {
- const uint64_t embeddings_size = std::min((uint64_t) ctx->embd_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_embd);
- write(&embeddings_size, sizeof(embeddings_size));
- if (embeddings_size) {
- write(ctx->embd, embeddings_size * sizeof(float));
- }
- }
- void write_kv_cache_meta(const llama_kv_cache & kv_self, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) {
- for (const auto & range : cell_ranges) {
- for (uint32_t i = range.first; i < range.second; ++i) {
- const auto & cell = kv_self.cells[i];
- const llama_pos pos = cell.pos;
- const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0;
- write(&pos, sizeof(pos));
- write(&n_seq_id, sizeof(n_seq_id));
- if (n_seq_id) {
- for (auto seq_id : cell.seq_id) {
- write(&seq_id, sizeof(seq_id));
- }
- }
- }
- }
- }
- void write_kv_cache_data(const struct llama_context * ctx, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) {
- const struct llama_kv_cache & kv_self = ctx->kv_self;
- const struct llama_hparams & hparams = ctx->model.hparams;
- const uint32_t v_trans = kv_self.v_trans ? 1 : 0;
- const uint32_t n_layer = hparams.n_layer;
- write(&v_trans, sizeof(v_trans));
- write(&n_layer, sizeof(n_layer));
- std::vector<uint8_t> tmp_buf;
- // Iterate and write all the keys first, each row is a cell
- // Get whole range at a time
- for (uint32_t il = 0; il < n_layer; ++il) {
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
- // Write key type
- const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type;
- write(&k_type_i, sizeof(k_type_i));
- // Write row size of key
- const uint64_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
- write(&k_size_row, sizeof(k_size_row));
- // Read each range of cells of k_size length each into tmp_buf and write out
- for (const auto & range : cell_ranges) {
- const size_t range_size = range.second - range.first;
- const size_t buf_size = range_size * k_size_row;
- write_tensor_data(kv_self.k_l[il], range.first * k_size_row, buf_size);
- }
- }
- if (!kv_self.v_trans) {
- for (uint32_t il = 0; il < n_layer; ++il) {
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
- // Write value type
- const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
- write(&v_type_i, sizeof(v_type_i));
- // Write row size of value
- const uint64_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa);
- write(&v_size_row, sizeof(v_size_row));
- // Read each range of cells of v_size length each into tmp_buf and write out
- for (const auto & range : cell_ranges) {
- const size_t range_size = range.second - range.first;
- const size_t buf_size = range_size * v_size_row;
- write_tensor_data(kv_self.v_l[il], range.first * v_size_row, buf_size);
- }
- }
- } else {
- // When v is transposed, we also need the element size and get the element ranges from each row
- const uint32_t kv_size = kv_self.size;
- for (uint32_t il = 0; il < n_layer; ++il) {
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
- // Write value type
- const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
- write(&v_type_i, sizeof(v_type_i));
- // Write element size
- const uint32_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
- write(&v_size_el, sizeof(v_size_el));
- // Write GQA embedding size
- write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
- // For each row, we get the element values of each cell
- for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
- // Read each range of cells of v_size_el length each into tmp_buf and write out
- for (const auto & range : cell_ranges) {
- const size_t range_size = range.second - range.first;
- const size_t src_offset = (range.first + j * kv_size) * v_size_el;
- const size_t buf_size = range_size * v_size_el;
- write_tensor_data(kv_self.v_l[il], src_offset, buf_size);
- }
- }
- }
- }
- }
- void write_kv_cache(const struct llama_context * ctx, llama_seq_id seq_id = -1) {
- const struct llama_kv_cache & kv_self = ctx->kv_self;
- std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
- uint32_t cell_count = 0;
- // Count the number of cells with the specified seq_id
- // Find all the ranges of cells with this seq id (or all, when -1)
- uint32_t cell_range_begin = kv_self.size;
- for (uint32_t i = 0; i < kv_self.size; ++i) {
- const auto & cell = kv_self.cells[i];
- if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) {
- ++cell_count;
- if (cell_range_begin == kv_self.size) {
- cell_range_begin = i;
- }
- } else {
- if (cell_range_begin != kv_self.size) {
- cell_ranges.emplace_back(cell_range_begin, i);
- cell_range_begin = kv_self.size;
- }
- }
- }
- if (cell_range_begin != kv_self.size) {
- cell_ranges.emplace_back(cell_range_begin, kv_self.size);
- }
- // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
- uint32_t cell_count_check = 0;
- for (const auto & range : cell_ranges) {
- cell_count_check += range.second - range.first;
- }
- GGML_ASSERT(cell_count == cell_count_check);
- write(&cell_count, sizeof(cell_count));
- write_kv_cache_meta(kv_self, cell_ranges, seq_id);
- write_kv_cache_data(ctx, cell_ranges);
- }
- };
- struct llama_data_read {
- virtual const uint8_t * read(size_t size) = 0;
- virtual void read_to(void * dst, size_t size) = 0;
- virtual size_t get_size_read() = 0;
- virtual ~llama_data_read() = default;
- void read_string(std::string & str) {
- uint32_t str_size;
- read_to(&str_size, sizeof(str_size));
- str.assign((const char *) read(str_size), str_size);
- }
- // validate model information
- void read_model_info(const struct llama_context * ctx) {
- std::string cur_arch_str = LLM_ARCH_NAMES.at(ctx->model.arch);
- std::string arch_str;
- read_string(arch_str);
- if (cur_arch_str != arch_str) {
- throw std::runtime_error(format("wrong model arch: '%s' instead of '%s'", arch_str.c_str(), cur_arch_str.c_str()));
- }
- // TODO: add more info which needs to be identical but which is not verified otherwise
- }
- //void read_rng(std::mt19937 & rng) {
- // std::string rng_str;
- // read_string(rng_str);
- // std::istringstream rng_ss(rng_str);
- // rng_ss >> rng;
- // if (rng_ss.fail()) {
- // throw std::runtime_error("failed to load RNG state");
- // }
- //}
- void read_output_ids(struct llama_context * ctx) {
- std::vector<int32_t> output_pos;
- uint32_t n_outputs;
- read_to(&n_outputs, sizeof(n_outputs));
- if (n_outputs > llama_output_reserve(*ctx, n_outputs)) {
- throw std::runtime_error("could not reserve outputs");
- }
- if (n_outputs) {
- output_pos.resize(n_outputs);
- read_to(output_pos.data(), n_outputs * sizeof(int32_t));
- for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) {
- int32_t id = output_pos[i];
- if ((uint32_t) id >= ctx->cparams.n_batch) {
- throw std::runtime_error(format("invalid output id, %d does not fit in batch size of %u", id, ctx->cparams.n_batch));
- }
- ctx->output_ids[id] = i;
- }
- ctx->n_outputs = n_outputs;
- }
- }
- void read_logits(struct llama_context * ctx) {
- uint64_t logits_size;
- read_to(&logits_size, sizeof(logits_size));
- if (ctx->logits_size < logits_size) {
- throw std::runtime_error("logits buffer too small");
- }
- if (logits_size) {
- read_to(ctx->logits, logits_size * sizeof(float));
- }
- }
- void read_embeddings(struct llama_context * ctx) {
- uint64_t embeddings_size;
- read_to(&embeddings_size, sizeof(embeddings_size));
- if (ctx->embd_size < embeddings_size) {
- throw std::runtime_error("embeddings buffer too small");
- }
- if (embeddings_size) {
- read_to(ctx->embd, embeddings_size * sizeof(float));
- }
- }
- bool read_kv_cache_meta(struct llama_context * ctx, uint32_t cell_count, llama_seq_id dest_seq_id = -1) {
- struct llama_kv_cache & kv_self = ctx->kv_self;
- if (dest_seq_id != -1) {
- // single sequence
- llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
- llama_ubatch batch = ctx->sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
- batch.n_tokens = cell_count;
- batch.n_seq_tokens = cell_count;
- batch.n_seqs = 1;
- for (uint32_t i = 0; i < cell_count; ++i) {
- llama_pos pos;
- uint32_t n_seq_id;
- read_to(&pos, sizeof(pos));
- read_to(&n_seq_id, sizeof(n_seq_id));
- if (n_seq_id != 0) {
- LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
- return false;
- }
- batch.pos[i] = pos;
- }
- batch.n_seq_id[0] = 1;
- batch.seq_id[0] = &dest_seq_id;
- if (!llama_kv_cache_find_slot(kv_self, batch)) {
- LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
- return false;
- }
- // DEBUG CHECK: kv_self.head should be our first cell, kv_self.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
- // Assume that this is one contiguous block of cells
- GGML_ASSERT(kv_self.head + cell_count <= kv_self.size);
- GGML_ASSERT(kv_self.cells[kv_self.head].pos == batch.pos[0]);
- GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].pos == batch.pos[cell_count - 1]);
- GGML_ASSERT(kv_self.cells[kv_self.head].has_seq_id(dest_seq_id));
- GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].has_seq_id(dest_seq_id));
- } else {
- // whole KV cache restore
- if (cell_count > kv_self.size) {
- LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
- return false;
- }
- llama_kv_cache_clear(kv_self);
- for (uint32_t i = 0; i < cell_count; ++i) {
- llama_kv_cell & cell = kv_self.cells[i];
- llama_pos pos;
- uint32_t n_seq_id;
- read_to(&pos, sizeof(pos));
- read_to(&n_seq_id, sizeof(n_seq_id));
- cell.pos = pos;
- for (uint32_t j = 0; j < n_seq_id; ++j) {
- llama_seq_id seq_id;
- read_to(&seq_id, sizeof(seq_id));
- if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) {
- LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx));
- return false;
- }
- cell.seq_id.insert(seq_id);
- if (kv_self.recurrent) {
- int32_t & tail = kv_self.cells[seq_id].tail;
- if (tail != -1) {
- LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail);
- return false;
- }
- tail = i;
- }
- }
- }
- kv_self.head = 0;
- kv_self.used = cell_count;
- }
- if (kv_self.recurrent) {
- for (uint32_t i = 0; i < cell_count; ++i) {
- uint32_t cell_id = kv_self.head + i;
- // make sure the recurrent states will keep their restored state
- kv_self.cells[cell_id].src = cell_id;
- }
- }
- return true;
- }
- bool read_kv_cache_data(struct llama_context * ctx, uint32_t cell_count) {
- const struct llama_hparams & hparams = ctx->model.hparams;
- struct llama_kv_cache & kv_self = ctx->kv_self;
- uint32_t v_trans;
- uint32_t n_layer;
- read_to(&v_trans, sizeof(v_trans));
- read_to(&n_layer, sizeof(n_layer));
- if (n_layer != hparams.n_layer) {
- LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer);
- return false;
- }
- if (cell_count > kv_self.size) {
- LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, kv_self.size);
- return false;
- }
- if (kv_self.v_trans != (bool) v_trans) {
- LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
- return false;
- }
- // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
- for (uint32_t il = 0; il < n_layer; ++il) {
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
- // Read type of key
- int32_t k_type_i_ref;
- read_to(&k_type_i_ref, sizeof(k_type_i_ref));
- const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type;
- if (k_type_i != k_type_i_ref) {
- LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
- return false;
- }
- // Read row size of key
- uint64_t k_size_row_ref;
- read_to(&k_size_row_ref, sizeof(k_size_row_ref));
- const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
- if (k_size_row != k_size_row_ref) {
- LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
- return false;
- }
- if (cell_count) {
- // Read and set the keys for the whole cell range
- ggml_backend_tensor_set(kv_self.k_l[il], read(cell_count * k_size_row), kv_self.head * k_size_row, cell_count * k_size_row);
- }
- }
- if (!kv_self.v_trans) {
- for (uint32_t il = 0; il < n_layer; ++il) {
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
- // Read type of value
- int32_t v_type_i_ref;
- read_to(&v_type_i_ref, sizeof(v_type_i_ref));
- const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
- if (v_type_i != v_type_i_ref) {
- LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
- return false;
- }
- // Read row size of value
- uint64_t v_size_row_ref;
- read_to(&v_size_row_ref, sizeof(v_size_row_ref));
- const size_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa);
- if (v_size_row != v_size_row_ref) {
- LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
- return false;
- }
- if (cell_count) {
- // Read and set the values for the whole cell range
- ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_row), kv_self.head * v_size_row, cell_count * v_size_row);
- }
- }
- } else {
- // For each layer, read the values for each cell (transposed)
- for (uint32_t il = 0; il < n_layer; ++il) {
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
- // Read type of value
- int32_t v_type_i_ref;
- read_to(&v_type_i_ref, sizeof(v_type_i_ref));
- const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
- if (v_type_i != v_type_i_ref) {
- LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
- return false;
- }
- // Read element size of value
- uint32_t v_size_el_ref;
- read_to(&v_size_el_ref, sizeof(v_size_el_ref));
- const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
- if (v_size_el != v_size_el_ref) {
- LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
- return false;
- }
- // Read GQA embedding size
- uint32_t n_embd_v_gqa_ref;
- read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref));
- if (n_embd_v_gqa != n_embd_v_gqa_ref) {
- LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il);
- return false;
- }
- if (cell_count) {
- // For each row in the transposed matrix, read the values for the whole cell range
- for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
- const size_t dst_offset = (kv_self.head + j * kv_self.size) * v_size_el;
- ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
- }
- }
- }
- }
- return true;
- }
- void read_kv_cache(struct llama_context * ctx, llama_seq_id seq_id = -1) {
- uint32_t cell_count;
- read_to(&cell_count, sizeof(cell_count));
- bool res = read_kv_cache_meta(ctx, cell_count, seq_id) && read_kv_cache_data(ctx, cell_count);
- if (!res) {
- if (seq_id == -1) {
- llama_kv_cache_clear(ctx);
- } else {
- llama_kv_cache_seq_rm(ctx, seq_id, -1, -1);
- }
- throw std::runtime_error("failed to restore kv cache");
- }
- }
- };
- struct llama_data_write_dummy : llama_data_write {
- size_t size_written = 0;
- llama_data_write_dummy() {}
- void write(const void * /* src */, size_t size) override {
- size_written += size;
- }
- void write_tensor_data(const struct ggml_tensor * /* tensor */, size_t /* offset */, size_t size) override {
- size_written += size;
- }
- size_t get_size_written() override {
- return size_written;
- }
- };
- struct llama_data_write_buffer : llama_data_write {
- uint8_t * ptr;
- size_t buf_size = 0;
- size_t size_written = 0;
- llama_data_write_buffer(uint8_t * p, size_t len) : ptr(p), buf_size(len) {}
- void write(const void * src, size_t size) override {
- if (size > buf_size) {
- throw std::runtime_error("unexpectedly reached end of buffer");
- }
- memcpy(ptr, src, size);
- ptr += size;
- size_written += size;
- buf_size -= size;
- }
- void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override {
- if (size > buf_size) {
- throw std::runtime_error("unexpectedly reached end of buffer");
- }
- ggml_backend_tensor_get(tensor, ptr, offset, size);
- ptr += size;
- size_written += size;
- buf_size -= size;
- }
- size_t get_size_written() override {
- return size_written;
- }
- };
- struct llama_data_read_buffer : llama_data_read {
- const uint8_t * ptr;
- size_t buf_size = 0;
- size_t size_read = 0;
- llama_data_read_buffer(const uint8_t * p, size_t len) : ptr(p), buf_size(len) {}
- const uint8_t * read(size_t size) override {
- const uint8_t * base_ptr = ptr;
- if (size > buf_size) {
- throw std::runtime_error("unexpectedly reached end of buffer");
- }
- ptr += size;
- size_read += size;
- buf_size -= size;
- return base_ptr;
- }
- void read_to(void * dst, size_t size) override {
- memcpy(dst, read(size), size);
- }
- size_t get_size_read() override {
- return size_read;
- }
- };
- struct llama_data_write_file : llama_data_write {
- llama_file * file;
- size_t size_written = 0;
- std::vector<uint8_t> temp_buffer;
- llama_data_write_file(llama_file * f) : file(f) {}
- void write(const void * src, size_t size) override {
- file->write_raw(src, size);
- size_written += size;
- }
- void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override {
- temp_buffer.resize(size);
- ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size);
- write(temp_buffer.data(), temp_buffer.size());
- }
- size_t get_size_written() override {
- return size_written;
- }
- };
- struct llama_data_read_file : llama_data_read {
- llama_file * file;
- size_t size_read = 0;
- std::vector<uint8_t> temp_buffer;
- llama_data_read_file(llama_file * f) : file(f) {}
- void read_to(void * dst, size_t size) override {
- file->read_raw(dst, size);
- size_read += size;
- }
- const uint8_t * read(size_t size) override {
- temp_buffer.resize(size);
- read_to(temp_buffer.data(), size);
- return temp_buffer.data();
- }
- size_t get_size_read() override {
- return size_read;
- }
- };
- /** copy state data into either a buffer or file depending on the passed in context
- *
- * file context:
- * llama_file file("/path", "wb");
- * llama_data_write_file data_ctx(&file);
- * llama_state_get_data_internal(ctx, data_ctx);
- *
- * buffer context:
- * std::vector<uint8_t> buf(max_size, 0);
- * llama_data_write_buffer data_ctx(buf.data(), max_size);
- * llama_state_get_data_internal(ctx, data_ctx);
- *
- */
- static size_t llama_state_get_data_internal(struct llama_context * ctx, llama_data_write & data_ctx) {
- llama_synchronize(ctx);
- data_ctx.write_model_info(ctx);
- // copy outputs
- data_ctx.write_output_ids(ctx);
- data_ctx.write_logits(ctx);
- data_ctx.write_embeddings(ctx);
- data_ctx.write_kv_cache(ctx);
- return data_ctx.get_size_written();
- }
- size_t llama_state_get_data(struct llama_context * ctx, uint8_t * dst, size_t size) {
- llama_data_write_buffer data_ctx(dst, size);
- try {
- return llama_state_get_data_internal(ctx, data_ctx);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what());
- return 0;
- }
- }
- // Returns the *actual* size of the state.
- // Intended to be used when saving to state to a buffer.
- size_t llama_state_get_size(struct llama_context * ctx) {
- llama_data_write_dummy data_ctx;
- try {
- return llama_state_get_data_internal(ctx, data_ctx);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what());
- return 0;
- }
- }
- static size_t llama_state_set_data_internal(struct llama_context * ctx, llama_data_read & data_ctx) {
- llama_synchronize(ctx);
- data_ctx.read_model_info(ctx);
- // set outputs
- data_ctx.read_output_ids(ctx);
- data_ctx.read_logits(ctx);
- data_ctx.read_embeddings(ctx);
- data_ctx.read_kv_cache(ctx);
- return data_ctx.get_size_read();
- }
- // Sets the state reading from the specified source address
- size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src, size_t size) {
- llama_data_read_buffer data_ctx(src, size);
- try {
- return llama_state_set_data_internal(ctx, data_ctx);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what());
- return 0;
- }
- }
- static bool llama_state_load_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
- llama_file file(path_session, "rb");
- // sanity checks
- {
- const uint32_t magic = file.read_u32();
- const uint32_t version = file.read_u32();
- if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
- LLAMA_LOG_ERROR("%s: unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
- return false;
- }
- }
- // load the prompt
- {
- const uint32_t n_token_count = file.read_u32();
- if (n_token_count > n_token_capacity) {
- LLAMA_LOG_ERROR("%s: token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
- return false;
- }
- file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
- *n_token_count_out = n_token_count;
- }
- // restore the context state
- {
- const size_t n_state_size_cur = file.size - file.tell();
- llama_data_read_file data_ctx(&file);
- const size_t n_read = llama_state_set_data_internal(ctx, data_ctx);
- if (n_read != n_state_size_cur) {
- LLAMA_LOG_ERROR("%s: did not read all of the session file data! size %zu, got %zu\n", __func__, n_state_size_cur, n_read);
- return false;
- }
- }
- return true;
- }
- bool llama_state_load_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
- try {
- return llama_state_load_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error loading session file: %s\n", __func__, err.what());
- return false;
- }
- }
- static bool llama_state_save_file_internal(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
- llama_file file(path_session, "wb");
- file.write_u32(LLAMA_SESSION_MAGIC);
- file.write_u32(LLAMA_SESSION_VERSION);
- // save the prompt
- file.write_u32((uint32_t) n_token_count);
- file.write_raw(tokens, sizeof(llama_token) * n_token_count);
- // save the context state using stream saving
- llama_data_write_file data_ctx(&file);
- llama_state_get_data_internal(ctx, data_ctx);
- return true;
- }
- bool llama_state_save_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
- try {
- return llama_state_save_file_internal(ctx, path_session, tokens, n_token_count);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error saving session file: %s\n", __func__, err.what());
- return false;
- }
- }
- static size_t llama_state_seq_get_data_internal(struct llama_context * ctx, llama_data_write & data_ctx, llama_seq_id seq_id) {
- llama_synchronize(ctx);
- data_ctx.write_kv_cache(ctx, seq_id);
- return data_ctx.get_size_written();
- }
- size_t llama_state_seq_get_size(struct llama_context * ctx, llama_seq_id seq_id) {
- llama_data_write_dummy data_ctx;
- return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
- }
- size_t llama_state_seq_get_data(struct llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id) {
- llama_data_write_buffer data_ctx(dst, size);
- try {
- return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error saving sequence state: %s\n", __func__, err.what());
- return 0;
- }
- }
- static size_t llama_state_seq_set_data_internal(struct llama_context * ctx, llama_data_read & data_ctx, llama_seq_id dest_seq_id) {
- llama_synchronize(ctx);
- data_ctx.read_kv_cache(ctx, dest_seq_id);
- return data_ctx.get_size_read();
- }
- size_t llama_state_seq_set_data(struct llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id dest_seq_id) {
- llama_data_read_buffer data_ctx(src, size);
- try {
- return llama_state_seq_set_data_internal(ctx, data_ctx, dest_seq_id);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error loading sequence state: %s\n", __func__, err.what());
- return 0;
- }
- }
- static size_t llama_state_seq_save_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) {
- llama_file file(filepath, "wb");
- file.write_u32(LLAMA_STATE_SEQ_MAGIC);
- file.write_u32(LLAMA_STATE_SEQ_VERSION);
- // save the prompt
- file.write_u32((uint32_t) n_token_count);
- file.write_raw(tokens, sizeof(llama_token) * n_token_count);
- // save the context state using stream saving
- llama_data_write_file data_ctx(&file);
- llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
- const size_t res = file.tell();
- GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + data_ctx.get_size_written());
- return res;
- }
- static size_t llama_state_seq_load_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
- llama_file file(filepath, "rb");
- // version checks
- {
- const uint32_t magic = file.read_u32();
- const uint32_t version = file.read_u32();
- if (magic != LLAMA_STATE_SEQ_MAGIC || version != LLAMA_STATE_SEQ_VERSION) {
- LLAMA_LOG_ERROR("%s: unknown (magic, version) for sequence state file: %08x, %08x\n", __func__, magic, version);
- return 0;
- }
- }
- // load the prompt
- {
- const uint32_t n_token_count = file.read_u32();
- if (n_token_count > n_token_capacity) {
- LLAMA_LOG_ERROR("%s: token count in sequence state file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
- return 0;
- }
- file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
- *n_token_count_out = n_token_count;
- }
- // restore the context state
- {
- const size_t state_size = file.size - file.tell();
- llama_data_read_file data_ctx(&file);
- const size_t nread = llama_state_seq_set_data_internal(ctx, data_ctx, dest_seq_id);
- if (!nread) {
- LLAMA_LOG_ERROR("%s: failed to restore sequence state\n", __func__);
- return 0;
- }
- GGML_ASSERT(nread <= state_size);
- GGML_ASSERT(nread + sizeof(uint32_t) * 3 + sizeof(llama_token) * *n_token_count_out == file.tell());
- }
- return file.tell();
- }
- size_t llama_state_seq_save_file(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) {
- try {
- return llama_state_seq_save_file_internal(ctx, filepath, seq_id, tokens, n_token_count);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error saving sequence state file: %s\n", __func__, err.what());
- return 0;
- }
- }
- size_t llama_state_seq_load_file(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
- try {
- return llama_state_seq_load_file_internal(ctx, filepath, dest_seq_id, tokens_out, n_token_capacity, n_token_count_out);
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: error loading sequence state file: %s\n", __func__, err.what());
- return 0;
- }
- }
- void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch) {
- ctx->cparams.n_threads = n_threads;
- ctx->cparams.n_threads_batch = n_threads_batch;
- }
- int32_t llama_n_threads(struct llama_context * ctx) {
- return ctx->cparams.n_threads;
- }
- int32_t llama_n_threads_batch(struct llama_context * ctx) {
- return ctx->cparams.n_threads_batch;
- }
- void llama_set_abort_callback(struct llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) {
- ctx->abort_callback = abort_callback;
- ctx->abort_callback_data = abort_callback_data;
- }
- void llama_set_embeddings(struct llama_context * ctx, bool embeddings) {
- ctx->cparams.embeddings = embeddings;
- }
- void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn) {
- ctx->cparams.causal_attn = causal_attn;
- }
- void llama_set_cross_attention(struct llama_context * ctx, bool cross_attention) {
- ctx->cparams.cross_attn = cross_attention;
- }
- struct llama_batch llama_batch_get_one(
- llama_token * tokens,
- int32_t n_tokens,
- llama_pos pos_0,
- llama_seq_id seq_id) {
- return {
- /*n_tokens =*/ n_tokens,
- /*tokens =*/ tokens,
- /*embd =*/ nullptr,
- /*n_embd =*/ 0,
- /*pos =*/ nullptr,
- /*n_seq_id =*/ nullptr,
- /*seq_id =*/ nullptr,
- /*logits =*/ nullptr,
- /*all_pos_0 =*/ pos_0,
- /*all_pos_1 =*/ 1,
- /*all_seq_id =*/ seq_id,
- };
- }
- struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) {
- llama_batch batch = {
- /*n_tokens =*/ 0,
- /*tokens =*/ nullptr,
- /*embd =*/ nullptr,
- /*n_embd =*/ 0,
- /*pos =*/ nullptr,
- /*n_seq_id =*/ nullptr,
- /*seq_id =*/ nullptr,
- /*logits =*/ nullptr,
- /*all_pos_0 =*/ 0,
- /*all_pos_1 =*/ 0,
- /*all_seq_id =*/ 0,
- };
- if (embd) {
- batch.embd = (float *) malloc(sizeof(float) * n_tokens_alloc * embd);
- batch.n_embd = embd;
- } else {
- batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens_alloc);
- }
- batch.pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens_alloc);
- batch.n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens_alloc);
- batch.seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * (n_tokens_alloc + 1));
- for (int i = 0; i < n_tokens_alloc; ++i) {
- batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max);
- }
- batch.seq_id[n_tokens_alloc] = nullptr;
- batch.logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens_alloc);
- return batch;
- }
- void llama_batch_free(struct llama_batch batch) {
- if (batch.token) free(batch.token);
- if (batch.embd) free(batch.embd);
- if (batch.pos) free(batch.pos);
- if (batch.n_seq_id) free(batch.n_seq_id);
- if (batch.seq_id) {
- for (int i = 0; batch.seq_id[i] != nullptr; ++i) {
- free(batch.seq_id[i]);
- }
- free(batch.seq_id);
- }
- if (batch.logits) free(batch.logits);
- }
- int32_t llama_encode(
- struct llama_context * ctx,
- struct llama_batch batch) {
- const int ret = llama_encode_internal(*ctx, batch);
- if (ret < 0) {
- LLAMA_LOG_ERROR("%s: failed to encode, ret = %d\n", __func__, ret);
- }
- return ret;
- }
- int32_t llama_decode(
- struct llama_context * ctx,
- struct llama_batch batch) {
- const int ret = llama_decode_internal(*ctx, batch);
- if (ret < 0) {
- LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
- }
- return ret;
- }
- void llama_synchronize(struct llama_context * ctx) {
- ggml_backend_sched_synchronize(ctx->sched);
- // FIXME: if multiple single tokens are evaluated without a synchronization,
- // the stats will be added to the prompt evaluation stats
- // this should only happen when using batch size 1 to evaluate a batch
- // add the evaluation to the stats
- if (ctx->n_queued_tokens == 1) {
- if (!ctx->cparams.no_perf) {
- ctx->t_eval_us += ggml_time_us() - ctx->t_compute_start_us;
- }
- ctx->n_eval++;
- } else if (ctx->n_queued_tokens > 1) {
- if (!ctx->cparams.no_perf) {
- ctx->t_p_eval_us += ggml_time_us() - ctx->t_compute_start_us;
- }
- ctx->n_p_eval += ctx->n_queued_tokens;
- }
- // get a more accurate load time, upon first eval
- if (ctx->n_queued_tokens > 0 && !ctx->has_evaluated_once) {
- ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
- ctx->has_evaluated_once = true;
- }
- ctx->n_queued_tokens = 0;
- ctx->t_compute_start_us = 0;
- }
- float * llama_get_logits(struct llama_context * ctx) {
- llama_synchronize(ctx);
- // reorder logits for backward compatibility
- // TODO: maybe deprecate this
- llama_output_reorder(ctx);
- return ctx->logits;
- }
- float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
- int32_t j = -1;
- llama_synchronize(ctx);
- try {
- if (ctx->logits == nullptr) {
- throw std::runtime_error("no logits");
- }
- if (i < 0) {
- j = ctx->n_outputs + i;
- if (j < 0) {
- throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs));
- }
- } else if ((size_t) i >= ctx->output_ids.size()) {
- throw std::runtime_error(format("out of range [0, %lu)", ctx->output_ids.size()));
- } else {
- j = ctx->output_ids[i];
- }
- if (j < 0) {
- throw std::runtime_error(format("batch.logits[%d] != true", i));
- }
- if (j >= ctx->n_outputs) {
- // This should not happen
- throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs));
- }
- return ctx->logits + j*ctx->model.hparams.n_vocab;
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what());
- #ifndef NDEBUG
- GGML_ABORT("fatal error");
- #else
- return nullptr;
- #endif
- }
- }
- float * llama_get_embeddings(struct llama_context * ctx) {
- llama_synchronize(ctx);
- // reorder embeddings for backward compatibility
- // TODO: maybe deprecate this
- llama_output_reorder(ctx);
- return ctx->embd;
- }
- float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) {
- int32_t j = -1;
- llama_synchronize(ctx);
- try {
- if (ctx->embd == nullptr) {
- throw std::runtime_error("no embeddings");
- }
- if (i < 0) {
- j = ctx->n_outputs + i;
- if (j < 0) {
- throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs));
- }
- } else if ((size_t) i >= ctx->output_ids.size()) {
- throw std::runtime_error(format("out of range [0, %lu)", ctx->output_ids.size()));
- } else {
- j = ctx->output_ids[i];
- }
- if (j < 0) {
- throw std::runtime_error(format("batch.logits[%d] != true", i));
- }
- if (j >= ctx->n_outputs) {
- // This should not happen
- throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs));
- }
- return ctx->embd + j*ctx->model.hparams.n_embd;
- } catch (const std::exception & err) {
- LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what());
- #ifndef NDEBUG
- GGML_ABORT("fatal error");
- #else
- return nullptr;
- #endif
- }
- }
- float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id) {
- llama_synchronize(ctx);
- auto it = ctx->embd_seq.find(seq_id);
- if (it == ctx->embd_seq.end()) {
- return nullptr;
- }
- return it->second.data();
- }
- //
- // vocab
- //
- const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
- return llama_token_get_text_impl(model->vocab, token);
- }
- float llama_token_get_score(const struct llama_model * model, llama_token token) {
- return llama_token_get_score_impl(model->vocab, token);
- }
- enum llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token) {
- return llama_token_get_attr_impl(model->vocab, token);
- }
- bool llama_token_is_eog(const struct llama_model * model, llama_token token) {
- return llama_token_is_eog_impl(model->vocab, token);
- }
- bool llama_token_is_control(const struct llama_model * model, llama_token token) {
- return llama_token_is_control_impl(model->vocab, token);
- }
- llama_token llama_token_bos(const struct llama_model * model) {
- return llama_token_bos_impl(model->vocab);
- }
- llama_token llama_token_eos(const struct llama_model * model) {
- return llama_token_eos_impl(model->vocab);
- }
- llama_token llama_token_cls(const struct llama_model * model) {
- return llama_token_cls_impl(model->vocab);
- }
- llama_token llama_token_sep(const struct llama_model * model) {
- return llama_token_sep_impl(model->vocab);
- }
- llama_token llama_token_nl (const struct llama_model * model) {
- return llama_token_nl_impl(model->vocab);
- }
- llama_token llama_token_pad(const struct llama_model * model) {
- return llama_token_pad_impl(model->vocab);
- }
- bool llama_add_bos_token(const struct llama_model * model) {
- return llama_add_bos_token_impl(model->vocab);
- }
- bool llama_add_eos_token(const struct llama_model * model) {
- return llama_add_eos_token_impl(model->vocab);
- }
- llama_token llama_token_prefix(const struct llama_model * model) {
- return llama_token_prefix_impl(model->vocab);
- }
- llama_token llama_token_middle(const struct llama_model * model) {
- return llama_token_middle_impl(model->vocab);
- }
- llama_token llama_token_suffix(const struct llama_model * model) {
- return llama_token_suffix_impl(model->vocab);
- }
- llama_token llama_token_eot(const struct llama_model * model) {
- return llama_token_eot_impl(model->vocab);
- }
- //
- // tokenization
- //
- int32_t llama_tokenize(
- const struct llama_model * model,
- const char * text,
- int32_t text_len,
- llama_token * tokens,
- int32_t n_tokens_max,
- bool add_special,
- bool parse_special) {
- return llama_tokenize_impl(model->vocab, text, text_len, tokens, n_tokens_max, add_special, parse_special);
- }
- int32_t llama_token_to_piece(
- const struct llama_model * model,
- llama_token token,
- char * buf,
- int32_t length,
- int32_t lstrip,
- bool special) {
- return llama_token_to_piece_impl(model->vocab, token, buf, length, lstrip, special);
- }
- int32_t llama_detokenize(
- const struct llama_model * model,
- const llama_token * tokens,
- int32_t n_tokens,
- char * text,
- int32_t text_len_max,
- bool remove_special,
- bool unparse_special) {
- return llama_detokenize_impl(model->vocab, tokens, n_tokens, text, text_len_max, remove_special, unparse_special);
- }
- //
- // chat templates
- //
- // Simple version of "llama_apply_chat_template" that only works with strings
- // This function uses heuristic checks to determine commonly used template. It is not a jinja parser.
- static int32_t llama_chat_apply_template_internal(
- const std::string & tmpl,
- const std::vector<const llama_chat_message *> & chat,
- std::string & dest, bool add_ass) {
- // Taken from the research: https://github.com/ggerganov/llama.cpp/issues/5527
- std::stringstream ss;
- auto tmpl_contains = [&tmpl](std::string haystack) -> bool {
- return tmpl.find(haystack) != std::string::npos;
- };
- if (tmpl == "chatml" || tmpl_contains("<|im_start|>")) {
- // chatml template
- for (auto message : chat) {
- ss << "<|im_start|>" << message->role << "\n" << message->content << "<|im_end|>\n";
- }
- if (add_ass) {
- ss << "<|im_start|>assistant\n";
- }
- } else if (tmpl == "llama2" || tmpl == "mistral" || tmpl_contains("[INST]")) {
- // llama2 template and its variants
- // [variant] support system message
- bool support_system_message = tmpl_contains("<<SYS>>") || tmpl == "mistral";
- // [variant] space before + after response
- bool space_around_response = tmpl_contains("' ' + eos_token");
- // [variant] add BOS inside history
- bool add_bos_inside_history = tmpl_contains("bos_token + '[INST]");
- // [variant] trim spaces from the input message
- bool strip_message = tmpl_contains("content.strip()");
- // construct the prompt
- bool is_inside_turn = true; // skip BOS at the beginning
- ss << "[INST] ";
- for (auto message : chat) {
- std::string content = strip_message ? trim(message->content) : message->content;
- std::string role(message->role);
- if (!is_inside_turn) {
- is_inside_turn = true;
- ss << (add_bos_inside_history ? "<s>[INST] " : "[INST] ");
- }
- if (role == "system") {
- if (support_system_message) {
- ss << "<<SYS>>\n" << content << "\n<</SYS>>\n\n";
- } else {
- // if the model does not support system message, we still include it in the first message, but without <<SYS>>
- ss << content << "\n";
- }
- } else if (role == "user") {
- ss << content << " [/INST]";
- } else {
- ss << (space_around_response ? " " : "") << content << (space_around_response ? " " : "") << "</s>";
- is_inside_turn = false;
- }
- }
- // llama2 templates seem to not care about "add_generation_prompt"
- } else if (tmpl == "phi3" || (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>"))) {
- // Phi 3
- for (auto message : chat) {
- std::string role(message->role);
- ss << "<|" << role << "|>\n" << message->content << "<|end|>\n";
- }
- if (add_ass) {
- ss << "<|assistant|>\n";
- }
- } else if (tmpl == "zephyr" || tmpl_contains("<|user|>")) {
- // zephyr template
- for (auto message : chat) {
- ss << "<|" << message->role << "|>" << "\n" << message->content << "<|endoftext|>\n";
- }
- if (add_ass) {
- ss << "<|assistant|>\n";
- }
- } else if (tmpl == "monarch" || tmpl_contains("bos_token + message['role']")) {
- // mlabonne/AlphaMonarch-7B template (the <s> is included inside history)
- for (auto message : chat) {
- std::string bos = (message == chat.front()) ? "" : "<s>"; // skip BOS for first message
- ss << bos << message->role << "\n" << message->content << "</s>\n";
- }
- if (add_ass) {
- ss << "<s>assistant\n";
- }
- } else if (tmpl == "gemma" || tmpl == "gemma2" || tmpl_contains("<start_of_turn>")) {
- // google/gemma-7b-it
- std::string system_prompt = "";
- for (auto message : chat) {
- std::string role(message->role);
- if (role == "system") {
- // there is no system message for gemma, but we will merge it with user prompt, so nothing is broken
- system_prompt = trim(message->content);
- continue;
- }
- // in gemma, "assistant" is "model"
- role = role == "assistant" ? "model" : message->role;
- ss << "<start_of_turn>" << role << "\n";
- if (!system_prompt.empty() && role != "model") {
- ss << system_prompt << "\n\n";
- system_prompt = "";
- }
- ss << trim(message->content) << "<end_of_turn>\n";
- }
- if (add_ass) {
- ss << "<start_of_turn>model\n";
- }
- } else if (tmpl == "orion" || tmpl_contains("'\\n\\nAssistant: ' + eos_token")) {
- // OrionStarAI/Orion-14B-Chat
- std::string system_prompt = "";
- for (auto message : chat) {
- std::string role(message->role);
- if (role == "system") {
- // there is no system message support, we will merge it with user prompt
- system_prompt = message->content;
- continue;
- } else if (role == "user") {
- ss << "Human: ";
- if (!system_prompt.empty()) {
- ss << system_prompt << "\n\n";
- system_prompt = "";
- }
- ss << message->content << "\n\nAssistant: </s>";
- } else {
- ss << message->content << "</s>";
- }
- }
- } else if (tmpl == "openchat" || tmpl_contains("GPT4 Correct ")) {
- // openchat/openchat-3.5-0106,
- for (auto message : chat) {
- std::string role(message->role);
- if (role == "system") {
- ss << message->content << "<|end_of_turn|>";
- } else {
- role[0] = toupper(role[0]);
- ss << "GPT4 Correct " << role << ": " << message->content << "<|end_of_turn|>";
- }
- }
- if (add_ass) {
- ss << "GPT4 Correct Assistant:";
- }
- } else if (tmpl == "vicuna" || tmpl == "vicuna-orca" || (tmpl_contains("USER: ") && tmpl_contains("ASSISTANT: "))) {
- // eachadea/vicuna-13b-1.1 (and Orca variant)
- for (auto message : chat) {
- std::string role(message->role);
- if (role == "system") {
- // Orca-Vicuna variant uses a system prefix
- if (tmpl == "vicuna-orca" || tmpl_contains("SYSTEM: ")) {
- ss << "SYSTEM: " << message->content << "\n";
- } else {
- ss << message->content << "\n\n";
- }
- } else if (role == "user") {
- ss << "USER: " << message->content << "\n";
- } else if (role == "assistant") {
- ss << "ASSISTANT: " << message->content << "</s>\n";
- }
- }
- if (add_ass) {
- ss << "ASSISTANT:";
- }
- } else if (tmpl == "deepseek" || (tmpl_contains("### Instruction:") && tmpl_contains("<|EOT|>"))) {
- // deepseek-ai/deepseek-coder-33b-instruct
- for (auto message : chat) {
- std::string role(message->role);
- if (role == "system") {
- ss << message->content;
- } else if (role == "user") {
- ss << "### Instruction:\n" << message->content << "\n";
- } else if (role == "assistant") {
- ss << "### Response:\n" << message->content << "\n<|EOT|>\n";
- }
- }
- if (add_ass) {
- ss << "### Response:\n";
- }
- } else if (tmpl == "command-r" || (tmpl_contains("<|START_OF_TURN_TOKEN|>") && tmpl_contains("<|USER_TOKEN|>"))) {
- // CohereForAI/c4ai-command-r-plus
- for (auto message : chat) {
- std::string role(message->role);
- if (role == "system") {
- ss << "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
- } else if (role == "user") {
- ss << "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
- } else if (role == "assistant") {
- ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
- }
- }
- if (add_ass) {
- ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>";
- }
- } else if (tmpl == "llama3" || (tmpl_contains("<|start_header_id|>") && tmpl_contains("<|end_header_id|>"))) {
- // Llama 3
- for (auto message : chat) {
- std::string role(message->role);
- ss << "<|start_header_id|>" << role << "<|end_header_id|>\n\n" << trim(message->content) << "<|eot_id|>";
- }
- if (add_ass) {
- ss << "<|start_header_id|>assistant<|end_header_id|>\n\n";
- }
- } else if (tmpl == "chatglm3" || tmpl_contains("[gMASK]sop")) {
- // chatglm3-6b
- ss << "[gMASK]" << "sop";
- for (auto message : chat) {
- std::string role(message->role);
- ss << "<|" << role << "|>" << "\n " << message->content;
- }
- if (add_ass) {
- ss << "<|assistant|>";
- }
- } else if (tmpl == "chatglm4" || tmpl_contains("[gMASK]<sop>")) {
- ss << "[gMASK]" << "<sop>";
- for (auto message : chat) {
- std::string role(message->role);
- ss << "<|" << role << "|>" << "\n" << message->content;
- }
- if (add_ass) {
- ss << "<|assistant|>";
- }
- } else if (tmpl == "minicpm" || tmpl_contains(LU8("<用户>"))) {
- // MiniCPM-3B-OpenHermes-2.5-v2-GGUF
- for (auto message : chat) {
- std::string role(message->role);
- if (role == "user") {
- ss << LU8("<用户>");
- ss << trim(message->content);
- ss << "<AI>";
- } else {
- ss << trim(message->content);
- }
- }
- } else if (tmpl == "deepseek2" || tmpl_contains("'Assistant: ' + message['content'] + eos_token")) {
- // DeepSeek-V2
- for (auto message : chat) {
- std::string role(message->role);
- if (role == "system") {
- ss << message->content << "\n\n";
- } else if (role == "user") {
- ss << "User: " << message->content << "\n\n";
- } else if (role == "assistant") {
- ss << "Assistant: " << message->content << LU8("<|end▁of▁sentence|>");
- }
- }
- if (add_ass) {
- ss << "Assistant:";
- }
- } else if (tmpl == "exaone3" || (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]"))) {
- // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
- // EXAONE-3.0-7.8B-Instruct
- for (auto message : chat) {
- std::string role(message->role);
- if (role == "system") {
- ss << "[|system|]" << trim(message->content) << "[|endofturn|]\n";
- } else if (role == "user") {
- ss << "[|user|]" << trim(message->content) << "\n";
- } else if (role == "assistant") {
- ss << "[|assistant|]" << trim(message->content) << "[|endofturn|]\n";
- }
- }
- if (add_ass) {
- ss << "[|assistant|]";
- }
- } else {
- // template not supported
- return -1;
- }
- dest = ss.str();
- return dest.size();
- }
- int32_t llama_chat_apply_template(
- const struct llama_model * model,
- const char * tmpl,
- const struct llama_chat_message * chat,
- size_t n_msg,
- bool add_ass,
- char * buf,
- int32_t length) {
- std::string curr_tmpl(tmpl == nullptr ? "" : tmpl);
- if (tmpl == nullptr) {
- GGML_ASSERT(model != nullptr);
- // load template from model
- std::vector<char> model_template(2048, 0); // longest known template is about 1200 bytes
- std::string template_key = "tokenizer.chat_template";
- int32_t res = llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size());
- if (res < 0) {
- // worst case: there is no information about template, we will use chatml by default
- curr_tmpl = "chatml"; // see llama_chat_apply_template_internal
- } else {
- curr_tmpl = std::string(model_template.data(), model_template.size());
- }
- }
- // format the chat to string
- std::vector<const llama_chat_message *> chat_vec;
- chat_vec.resize(n_msg);
- for (size_t i = 0; i < n_msg; i++) {
- chat_vec[i] = &chat[i];
- }
- std::string formatted_chat;
- int32_t res = llama_chat_apply_template_internal(curr_tmpl, chat_vec, formatted_chat, add_ass);
- if (res < 0) {
- return res;
- }
- if (buf && length > 0) {
- strncpy(buf, formatted_chat.c_str(), length);
- }
- return res;
- }
- //
- // sampling
- //
- // TODO: remove indirection when vocab becomes accesible in llama-sampling.cpp
- struct llama_sampler * llama_sampler_init_grammar(const struct llama_model * model, const char * grammar_str, const char * grammar_root) {
- return llama_sampler_init_grammar_impl(model->vocab, grammar_str, grammar_root);
- }
- //
- // model split
- //
- int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count) {
- static const char * const SPLIT_PATH_FORMAT = "%s-%05d-of-%05d.gguf";
- if (snprintf(split_path, maxlen, SPLIT_PATH_FORMAT, path_prefix, split_no + 1, split_count)) {
- return strlen(split_path);
- }
- return 0;
- }
- int llama_split_prefix(char * dest, size_t maxlen, const char * split_path, int split_no, int split_count) {
- std::string str_split_path(split_path);
- char postfix[32];
- snprintf(postfix, 32, "-%05d-of-%05d.gguf", split_no + 1, split_count);
- std::string str_postfix(postfix);
- // check if dest ends with postfix
- int size_prefix = str_split_path.size() - str_postfix.size();
- if (size_prefix > 0 && str_split_path.find(str_postfix, size_prefix) != std::string::npos) {
- snprintf(dest, std::min((size_t) size_prefix + 1, maxlen), "%s", split_path);
- return size_prefix;
- }
- return 0;
- }
- const char * llama_print_system_info(void) {
- static std::string s;
- s = "";
- s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
- s += "AVX_VNNI = " + std::to_string(ggml_cpu_has_avx_vnni()) + " | ";
- s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
- s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
- s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";
- s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | ";
- s += "AVX512_BF16 = " + std::to_string(ggml_cpu_has_avx512_bf16()) + " | ";
- s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
- s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
- s += "SVE = " + std::to_string(ggml_cpu_has_sve()) + " | ";
- s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
- s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
- s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
- s += "RISCV_VECT = " + std::to_string(ggml_cpu_has_riscv_v()) + " | ";
- s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
- s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
- s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
- s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
- s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
- s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
- s += "LLAMAFILE = " + std::to_string(ggml_cpu_has_llamafile()) + " | ";
- return s.c_str();
- }
- struct llama_perf_context_data llama_perf_context(const struct llama_context * ctx) {
- struct llama_perf_context_data data = {};
- if (ctx == nullptr) {
- return data;
- }
- data.t_start_ms = 1e-3 * ctx->t_start_us;
- data.t_load_ms = 1e-3 * ctx->t_load_us;
- data.t_p_eval_ms = 1e-3 * ctx->t_p_eval_us;
- data.t_eval_ms = 1e-3 * ctx->t_eval_us;
- data.n_p_eval = std::max(1, ctx->n_p_eval);
- data.n_eval = std::max(1, ctx->n_eval);
- return data;
- }
- void llama_perf_context_print(const struct llama_context * ctx) {
- const auto data = llama_perf_context(ctx);
- const double t_end_ms = 1e-3 * ggml_time_us();
- LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, data.t_load_ms);
- LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
- __func__, data.t_p_eval_ms, data.n_p_eval, data.t_p_eval_ms / data.n_p_eval, 1e3 / data.t_p_eval_ms * data.n_p_eval);
- LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
- __func__, data.t_eval_ms, data.n_eval, data.t_eval_ms / data.n_eval, 1e3 / data.t_eval_ms * data.n_eval);
- LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval));
- }
- void llama_perf_context_reset(struct llama_context * ctx) {
- ctx->t_start_us = ggml_time_us();
- ctx->t_eval_us = ctx->n_eval = 0;
- ctx->t_p_eval_us = ctx->n_p_eval = 0;
- }
- void llama_perf_dump_yaml(FILE * stream, const llama_context * ctx) {
- fprintf(stream, "\n");
- fprintf(stream, "###########\n");
- fprintf(stream, "# Timings #\n");
- fprintf(stream, "###########\n");
- fprintf(stream, "\n");
- fprintf(stream, "mst_eval: %.2f # ms / token during generation\n",
- 1.0e-3 * ctx->t_eval_us / ctx->n_eval);
- fprintf(stream, "mst_p_eval: %.2f # ms / token during prompt processing\n",
- 1.0e-3 * ctx->t_p_eval_us / ctx->n_p_eval);
- fprintf(stream, "n_eval: %d # number of tokens generated (excluding the first one)\n", ctx->n_eval);
- fprintf(stream, "n_p_eval: %d # number of tokens processed in batches at the beginning\n", ctx->n_p_eval);
- fprintf(stream, "t_eval_us: %" PRId64 " # total microseconds spent generating tokens\n", ctx->t_eval_us);
- fprintf(stream, "t_load_us: %" PRId64 " # total microseconds spent loading the model\n", ctx->t_load_us);
- fprintf(stream, "t_p_eval_us: %" PRId64 " # total microseconds spent prompt processing\n", ctx->t_p_eval_us);
- fprintf(stream, "ts_eval: %.2f # tokens / second during generation\n",
- 1.0e6 * ctx->n_eval / ctx->t_eval_us);
- fprintf(stream, "ts_p_eval: %.2f # tokens / second during prompt processing\n",
- 1.0e6 * ctx->n_p_eval / ctx->t_p_eval_us);
- }
- // For internal test use
- const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
- struct llama_context * ctx
- ) {
- return ctx->model.tensors_by_name;
- }
- void llama_log_set(ggml_log_callback log_callback, void * user_data) {
- g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
- g_state.log_callback_user_data = user_data;
- #ifdef GGML_USE_METAL
- ggml_backend_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
- #elif defined(GGML_USE_CUDA)
- ggml_backend_cuda_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
- #elif defined(GGML_USE_CANN)
- ggml_backend_cann_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
- #endif
- }
- static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
- va_list args_copy;
- va_copy(args_copy, args);
- char buffer[128];
- int len = vsnprintf(buffer, 128, format, args);
- if (len < 128) {
- g_state.log_callback(level, buffer, g_state.log_callback_user_data);
- } else {
- char * buffer2 = new char[len + 1];
- vsnprintf(buffer2, len + 1, format, args_copy);
- buffer2[len] = 0;
- g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
- delete[] buffer2;
- }
- va_end(args_copy);
- }
- void llama_log_internal(ggml_log_level level, const char * format, ...) {
- va_list args;
- va_start(args, format);
- llama_log_internal_v(level, format, args);
- va_end(args);
- }
- void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
- (void) level;
- (void) user_data;
- fputs(text, stderr);
- fflush(stderr);
- }
|