ggml-quants.c 210 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273
  1. /**
  2. * llama.cpp - commit 40c6d79fb52f995f47507fedfeaae2ac05d9b35c - do not edit this file
  3. *
  4. * MIT License
  5. *
  6. * Copyright (c) 2023-2024 The ggml authors
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. * SOFTWARE.
  25. */
  26. #define GGML_COMMON_IMPL_C
  27. #include "ggml-common.h"
  28. #include "ggml-quants.h"
  29. #include "ggml-impl.h"
  30. #include "ggml-cpu-impl.h"
  31. #include "ggml-cpu.h"
  32. #include <math.h>
  33. #include <string.h>
  34. #include <assert.h>
  35. #include <float.h>
  36. #include <stdlib.h> // for qsort
  37. #include <stdio.h> // for GGML_ASSERT
  38. #define GROUP_MAX_EPS 1e-15f
  39. #define GROUP_MAX_EPS_IQ3_XXS 1e-8f
  40. #define GROUP_MAX_EPS_IQ2_S 1e-8f
  41. #define GROUP_MAX_EPS_IQ1_M 1e-7f
  42. #define GROUP_MAX_EPS_IQ1_S 1e-12f
  43. #if defined(_MSC_VER)
  44. // disable "possible loss of data" to avoid warnings for hundreds of casts
  45. // we should just be careful :)
  46. #pragma warning(disable: 4244 4267)
  47. #endif
  48. #define UNUSED GGML_UNUSED
  49. // reference implementation for deterministic creation of model files
  50. void quantize_row_q4_0_ref(const float * restrict x, block_q4_0 * restrict y, int64_t k) {
  51. static const int qk = QK4_0;
  52. assert(k % qk == 0);
  53. const int nb = k / qk;
  54. for (int i = 0; i < nb; i++) {
  55. float amax = 0.0f; // absolute max
  56. float max = 0.0f;
  57. for (int j = 0; j < qk; j++) {
  58. const float v = x[i*qk + j];
  59. if (amax < fabsf(v)) {
  60. amax = fabsf(v);
  61. max = v;
  62. }
  63. }
  64. const float d = max / -8;
  65. const float id = d ? 1.0f/d : 0.0f;
  66. y[i].d = GGML_FP32_TO_FP16(d);
  67. for (int j = 0; j < qk/2; ++j) {
  68. const float x0 = x[i*qk + 0 + j]*id;
  69. const float x1 = x[i*qk + qk/2 + j]*id;
  70. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
  71. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
  72. y[i].qs[j] = xi0;
  73. y[i].qs[j] |= xi1 << 4;
  74. }
  75. }
  76. }
  77. void quantize_row_q4_1_ref(const float * restrict x, block_q4_1 * restrict y, int64_t k) {
  78. const int qk = QK4_1;
  79. assert(k % qk == 0);
  80. const int nb = k / qk;
  81. for (int i = 0; i < nb; i++) {
  82. float min = FLT_MAX;
  83. float max = -FLT_MAX;
  84. for (int j = 0; j < qk; j++) {
  85. const float v = x[i*qk + j];
  86. if (v < min) min = v;
  87. if (v > max) max = v;
  88. }
  89. const float d = (max - min) / ((1 << 4) - 1);
  90. const float id = d ? 1.0f/d : 0.0f;
  91. y[i].d = GGML_FP32_TO_FP16(d);
  92. y[i].m = GGML_FP32_TO_FP16(min);
  93. for (int j = 0; j < qk/2; ++j) {
  94. const float x0 = (x[i*qk + 0 + j] - min)*id;
  95. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  96. const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
  97. const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
  98. y[i].qs[j] = xi0;
  99. y[i].qs[j] |= xi1 << 4;
  100. }
  101. }
  102. }
  103. void quantize_row_q5_0_ref(const float * restrict x, block_q5_0 * restrict y, int64_t k) {
  104. static const int qk = QK5_0;
  105. assert(k % qk == 0);
  106. const int nb = k / qk;
  107. for (int i = 0; i < nb; i++) {
  108. float amax = 0.0f; // absolute max
  109. float max = 0.0f;
  110. for (int j = 0; j < qk; j++) {
  111. const float v = x[i*qk + j];
  112. if (amax < fabsf(v)) {
  113. amax = fabsf(v);
  114. max = v;
  115. }
  116. }
  117. const float d = max / -16;
  118. const float id = d ? 1.0f/d : 0.0f;
  119. y[i].d = GGML_FP32_TO_FP16(d);
  120. uint32_t qh = 0;
  121. for (int j = 0; j < qk/2; ++j) {
  122. const float x0 = x[i*qk + 0 + j]*id;
  123. const float x1 = x[i*qk + qk/2 + j]*id;
  124. const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
  125. const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
  126. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  127. // get the 5-th bit and store it in qh at the right position
  128. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  129. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  130. }
  131. memcpy(&y[i].qh, &qh, sizeof(qh));
  132. }
  133. }
  134. void quantize_row_q5_1_ref(const float * restrict x, block_q5_1 * restrict y, int64_t k) {
  135. const int qk = QK5_1;
  136. assert(k % qk == 0);
  137. const int nb = k / qk;
  138. for (int i = 0; i < nb; i++) {
  139. float min = FLT_MAX;
  140. float max = -FLT_MAX;
  141. for (int j = 0; j < qk; j++) {
  142. const float v = x[i*qk + j];
  143. if (v < min) min = v;
  144. if (v > max) max = v;
  145. }
  146. const float d = (max - min) / ((1 << 5) - 1);
  147. const float id = d ? 1.0f/d : 0.0f;
  148. y[i].d = GGML_FP32_TO_FP16(d);
  149. y[i].m = GGML_FP32_TO_FP16(min);
  150. uint32_t qh = 0;
  151. for (int j = 0; j < qk/2; ++j) {
  152. const float x0 = (x[i*qk + 0 + j] - min)*id;
  153. const float x1 = (x[i*qk + qk/2 + j] - min)*id;
  154. const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
  155. const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
  156. y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  157. // get the 5-th bit and store it in qh at the right position
  158. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  159. qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
  160. }
  161. memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
  162. }
  163. }
  164. // reference implementation for deterministic creation of model files
  165. void quantize_row_q8_0_ref(const float * restrict x, block_q8_0 * restrict y, int64_t k) {
  166. assert(k % QK8_0 == 0);
  167. const int nb = k / QK8_0;
  168. for (int i = 0; i < nb; i++) {
  169. float amax = 0.0f; // absolute max
  170. for (int j = 0; j < QK8_0; j++) {
  171. const float v = x[i*QK8_0 + j];
  172. amax = MAX(amax, fabsf(v));
  173. }
  174. const float d = amax / ((1 << 7) - 1);
  175. const float id = d ? 1.0f/d : 0.0f;
  176. y[i].d = GGML_FP32_TO_FP16(d);
  177. for (int j = 0; j < QK8_0; ++j) {
  178. const float x0 = x[i*QK8_0 + j]*id;
  179. y[i].qs[j] = roundf(x0);
  180. }
  181. }
  182. }
  183. // reference implementation for deterministic creation of model files
  184. void quantize_row_q8_1_ref(const float * restrict x, block_q8_1 * restrict y, int64_t k) {
  185. assert(QK8_1 == 32);
  186. assert(k % QK8_1 == 0);
  187. const int nb = k / QK8_1;
  188. for (int i = 0; i < nb; i++) {
  189. float amax = 0.0f; // absolute max
  190. for (int j = 0; j < QK8_1; j++) {
  191. const float v = x[i*QK8_1 + j];
  192. amax = MAX(amax, fabsf(v));
  193. }
  194. const float d = amax / ((1 << 7) - 1);
  195. const float id = d ? 1.0f/d : 0.0f;
  196. y[i].d = GGML_FP32_TO_FP16(d);
  197. int sum = 0;
  198. for (int j = 0; j < QK8_1/2; ++j) {
  199. const float v0 = x[i*QK8_1 + j]*id;
  200. const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
  201. y[i].qs[ j] = roundf(v0);
  202. y[i].qs[QK8_1/2 + j] = roundf(v1);
  203. sum += y[i].qs[ j];
  204. sum += y[i].qs[QK8_1/2 + j];
  205. }
  206. y[i].s = GGML_FP32_TO_FP16(sum*d);
  207. }
  208. }
  209. void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int64_t k) {
  210. static const int qk = QK4_0;
  211. assert(k % qk == 0);
  212. const int nb = k / qk;
  213. for (int i = 0; i < nb; i++) {
  214. const float d = GGML_FP16_TO_FP32(x[i].d);
  215. for (int j = 0; j < qk/2; ++j) {
  216. const int x0 = (x[i].qs[j] & 0x0F) - 8;
  217. const int x1 = (x[i].qs[j] >> 4) - 8;
  218. y[i*qk + j + 0 ] = x0*d;
  219. y[i*qk + j + qk/2] = x1*d;
  220. }
  221. }
  222. }
  223. void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int64_t k) {
  224. static const int qk = QK4_1;
  225. assert(k % qk == 0);
  226. const int nb = k / qk;
  227. for (int i = 0; i < nb; i++) {
  228. const float d = GGML_FP16_TO_FP32(x[i].d);
  229. const float m = GGML_FP16_TO_FP32(x[i].m);
  230. for (int j = 0; j < qk/2; ++j) {
  231. const int x0 = (x[i].qs[j] & 0x0F);
  232. const int x1 = (x[i].qs[j] >> 4);
  233. y[i*qk + j + 0 ] = x0*d + m;
  234. y[i*qk + j + qk/2] = x1*d + m;
  235. }
  236. }
  237. }
  238. void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int64_t k) {
  239. static const int qk = QK5_0;
  240. assert(k % qk == 0);
  241. const int nb = k / qk;
  242. for (int i = 0; i < nb; i++) {
  243. const float d = GGML_FP16_TO_FP32(x[i].d);
  244. uint32_t qh;
  245. memcpy(&qh, x[i].qh, sizeof(qh));
  246. for (int j = 0; j < qk/2; ++j) {
  247. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  248. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  249. const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
  250. const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
  251. y[i*qk + j + 0 ] = x0*d;
  252. y[i*qk + j + qk/2] = x1*d;
  253. }
  254. }
  255. }
  256. void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int64_t k) {
  257. static const int qk = QK5_1;
  258. assert(k % qk == 0);
  259. const int nb = k / qk;
  260. for (int i = 0; i < nb; i++) {
  261. const float d = GGML_FP16_TO_FP32(x[i].d);
  262. const float m = GGML_FP16_TO_FP32(x[i].m);
  263. uint32_t qh;
  264. memcpy(&qh, x[i].qh, sizeof(qh));
  265. for (int j = 0; j < qk/2; ++j) {
  266. const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
  267. const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
  268. const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
  269. const int x1 = (x[i].qs[j] >> 4) | xh_1;
  270. y[i*qk + j + 0 ] = x0*d + m;
  271. y[i*qk + j + qk/2] = x1*d + m;
  272. }
  273. }
  274. }
  275. void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int64_t k) {
  276. static const int qk = QK8_0;
  277. assert(k % qk == 0);
  278. const int nb = k / qk;
  279. for (int i = 0; i < nb; i++) {
  280. const float d = GGML_FP16_TO_FP32(x[i].d);
  281. for (int j = 0; j < qk; ++j) {
  282. y[i*qk + j] = x[i].qs[j]*d;
  283. }
  284. }
  285. }
  286. //
  287. // 2-6 bit quantization in super-blocks
  288. //
  289. //
  290. // ===================== Helper functions
  291. //
  292. static inline int nearest_int(float fval) {
  293. assert(fabsf(fval) <= 4194303.f);
  294. float val = fval + 12582912.f;
  295. int i; memcpy(&i, &val, sizeof(int));
  296. return (i & 0x007fffff) - 0x00400000;
  297. }
  298. static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, int rmse_type,
  299. const float * restrict qw) {
  300. float max = 0;
  301. float amax = 0;
  302. for (int i = 0; i < n; ++i) {
  303. float ax = fabsf(x[i]);
  304. if (ax > amax) { amax = ax; max = x[i]; }
  305. }
  306. if (amax < GROUP_MAX_EPS) { // all zero
  307. for (int i = 0; i < n; ++i) {
  308. L[i] = 0;
  309. }
  310. return 0.f;
  311. }
  312. float iscale = -nmax / max;
  313. if (rmse_type == 0) {
  314. for (int i = 0; i < n; ++i) {
  315. int l = nearest_int(iscale * x[i]);
  316. L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
  317. }
  318. return 1/iscale;
  319. }
  320. bool return_early = false;
  321. if (rmse_type < 0) {
  322. rmse_type = -rmse_type;
  323. return_early = true;
  324. }
  325. float sumlx = 0;
  326. float suml2 = 0;
  327. #ifdef HAVE_BUGGY_APPLE_LINKER
  328. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  329. for (volatile int i = 0; i < n; ++i) {
  330. #else
  331. for (int i = 0; i < n; ++i) {
  332. #endif
  333. int l = nearest_int(iscale * x[i]);
  334. l = MAX(-nmax, MIN(nmax-1, l));
  335. L[i] = l + nmax;
  336. float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
  337. sumlx += w*x[i]*l;
  338. suml2 += w*l*l;
  339. }
  340. float scale = suml2 ? sumlx/suml2 : 0.0f;
  341. if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale;
  342. float best = scale * sumlx;
  343. for (int is = -9; is <= 9; ++is) {
  344. if (is == 0) {
  345. continue;
  346. }
  347. iscale = -(nmax + 0.1f*is) / max;
  348. sumlx = suml2 = 0;
  349. for (int i = 0; i < n; ++i) {
  350. int l = nearest_int(iscale * x[i]);
  351. l = MAX(-nmax, MIN(nmax-1, l));
  352. float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i]));
  353. sumlx += w*x[i]*l;
  354. suml2 += w*l*l;
  355. }
  356. if (suml2 > 0 && sumlx*sumlx > best*suml2) {
  357. for (int i = 0; i < n; ++i) {
  358. int l = nearest_int(iscale * x[i]);
  359. L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
  360. }
  361. scale = sumlx/suml2; best = scale*sumlx;
  362. }
  363. }
  364. return scale;
  365. }
  366. static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, bool do_rmse) {
  367. float max = 0;
  368. float amax = 0;
  369. for (int i = 0; i < n; ++i) {
  370. float ax = fabsf(x[i]);
  371. if (ax > amax) { amax = ax; max = x[i]; }
  372. }
  373. if (amax < GROUP_MAX_EPS) { // all zero
  374. for (int i = 0; i < n; ++i) { L[i] = 0; }
  375. return 0.f;
  376. }
  377. float iscale = -nmax / max;
  378. if (do_rmse) {
  379. float sumlx = 0;
  380. float suml2 = 0;
  381. for (int i = 0; i < n; ++i) {
  382. int l = nearest_int(iscale * x[i]);
  383. l = MAX(-nmax, MIN(nmax-1, l));
  384. L[i] = l;
  385. float w = x[i]*x[i];
  386. sumlx += w*x[i]*l;
  387. suml2 += w*l*l;
  388. }
  389. for (int itry = 0; itry < 5; ++itry) {
  390. int n_changed = 0;
  391. for (int i = 0; i < n; ++i) {
  392. float w = x[i]*x[i];
  393. float slx = sumlx - w*x[i]*L[i];
  394. if (slx > 0) {
  395. float sl2 = suml2 - w*L[i]*L[i];
  396. int new_l = nearest_int(x[i] * sl2 / slx);
  397. new_l = MAX(-nmax, MIN(nmax-1, new_l));
  398. if (new_l != L[i]) {
  399. slx += w*x[i]*new_l;
  400. sl2 += w*new_l*new_l;
  401. if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) {
  402. L[i] = new_l; sumlx = slx; suml2 = sl2;
  403. ++n_changed;
  404. }
  405. }
  406. }
  407. }
  408. if (!n_changed) {
  409. break;
  410. }
  411. }
  412. for (int i = 0; i < n; ++i) {
  413. L[i] += nmax;
  414. }
  415. return sumlx / suml2;
  416. }
  417. for (int i = 0; i < n; ++i) {
  418. int l = nearest_int(iscale * x[i]);
  419. l = MAX(-nmax, MIN(nmax-1, l));
  420. L[i] = l + nmax;
  421. }
  422. return 1/iscale;
  423. }
  424. static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min,
  425. int ntry, float alpha) {
  426. float min = x[0];
  427. float max = x[0];
  428. for (int i = 1; i < n; ++i) {
  429. if (x[i] < min) min = x[i];
  430. if (x[i] > max) max = x[i];
  431. }
  432. if (max == min) {
  433. for (int i = 0; i < n; ++i) L[i] = 0;
  434. *the_min = 0;
  435. return 0.f;
  436. }
  437. if (min > 0) min = 0;
  438. float iscale = nmax/(max - min);
  439. float scale = 1/iscale;
  440. for (int itry = 0; itry < ntry; ++itry) {
  441. float sumlx = 0; int suml2 = 0;
  442. bool did_change = false;
  443. for (int i = 0; i < n; ++i) {
  444. int l = nearest_int(iscale*(x[i] - min));
  445. l = MAX(0, MIN(nmax, l));
  446. if (l != L[i]) {
  447. L[i] = l;
  448. did_change = true;
  449. }
  450. sumlx += (x[i] - min)*l;
  451. suml2 += l*l;
  452. }
  453. scale = sumlx/suml2;
  454. float sum = 0;
  455. for (int i = 0; i < n; ++i) {
  456. sum += x[i] - scale*L[i];
  457. }
  458. min = alpha*min + (1 - alpha)*sum/n;
  459. if (min > 0) min = 0;
  460. iscale = 1/scale;
  461. if (!did_change) break;
  462. }
  463. *the_min = -min;
  464. return scale;
  465. }
  466. static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
  467. uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
  468. float rmin, float rdelta, int nstep, bool use_mad) {
  469. float min = x[0];
  470. float max = x[0];
  471. float sum_w = weights[0];
  472. float sum_x = sum_w * x[0];
  473. #ifdef HAVE_BUGGY_APPLE_LINKER
  474. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  475. for (volatile int i = 1; i < n; ++i) {
  476. #else
  477. for (int i = 1; i < n; ++i) {
  478. #endif
  479. if (x[i] < min) min = x[i];
  480. if (x[i] > max) max = x[i];
  481. float w = weights[i];
  482. sum_w += w;
  483. sum_x += w * x[i];
  484. }
  485. if (min > 0) min = 0;
  486. if (max == min) {
  487. for (int i = 0; i < n; ++i) L[i] = 0;
  488. *the_min = -min;
  489. return 0.f;
  490. }
  491. float iscale = nmax/(max - min);
  492. float scale = 1/iscale;
  493. float best_mad = 0;
  494. for (int i = 0; i < n; ++i) {
  495. int l = nearest_int(iscale*(x[i] - min));
  496. L[i] = MAX(0, MIN(nmax, l));
  497. float diff = scale * L[i] + min - x[i];
  498. diff = use_mad ? fabsf(diff) : diff * diff;
  499. float w = weights[i];
  500. best_mad += w * diff;
  501. }
  502. if (nstep < 1) {
  503. *the_min = -min;
  504. return scale;
  505. }
  506. for (int is = 0; is <= nstep; ++is) {
  507. iscale = (rmin + rdelta*is + nmax)/(max - min);
  508. float sum_l = 0, sum_l2 = 0, sum_xl = 0;
  509. for (int i = 0; i < n; ++i) {
  510. int l = nearest_int(iscale*(x[i] - min));
  511. l = MAX(0, MIN(nmax, l));
  512. Laux[i] = l;
  513. float w = weights[i];
  514. sum_l += w*l;
  515. sum_l2 += w*l*l;
  516. sum_xl += w*l*x[i];
  517. }
  518. float D = sum_w * sum_l2 - sum_l * sum_l;
  519. if (D > 0) {
  520. float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
  521. float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
  522. if (this_min > 0) {
  523. this_min = 0;
  524. this_scale = sum_xl / sum_l2;
  525. }
  526. float mad = 0;
  527. for (int i = 0; i < n; ++i) {
  528. float diff = this_scale * Laux[i] + this_min - x[i];
  529. diff = use_mad ? fabsf(diff) : diff * diff;
  530. float w = weights[i];
  531. mad += w * diff;
  532. }
  533. if (mad < best_mad) {
  534. for (int i = 0; i < n; ++i) {
  535. L[i] = Laux[i];
  536. }
  537. best_mad = mad;
  538. scale = this_scale;
  539. min = this_min;
  540. }
  541. }
  542. }
  543. *the_min = -min;
  544. return scale;
  545. }
  546. static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) {
  547. if (j < 4) {
  548. *d = q[j] & 63; *m = q[j + 4] & 63;
  549. } else {
  550. *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
  551. *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
  552. }
  553. }
  554. //========================- 2-bit (de)-quantization
  555. void quantize_row_q2_K_ref(const float * restrict x, block_q2_K * restrict y, int64_t k) {
  556. assert(k % QK_K == 0);
  557. const int nb = k / QK_K;
  558. uint8_t L[QK_K];
  559. uint8_t Laux[16];
  560. float weights[16];
  561. float mins[QK_K/16];
  562. float scales[QK_K/16];
  563. const float q4scale = 15.f;
  564. for (int i = 0; i < nb; i++) {
  565. float max_scale = 0; // as we are deducting the min, scales are always positive
  566. float max_min = 0;
  567. for (int j = 0; j < QK_K/16; ++j) {
  568. for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]);
  569. scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true);
  570. float scale = scales[j];
  571. if (scale > max_scale) {
  572. max_scale = scale;
  573. }
  574. float min = mins[j];
  575. if (min > max_min) {
  576. max_min = min;
  577. }
  578. }
  579. if (max_scale > 0) {
  580. float iscale = q4scale/max_scale;
  581. for (int j = 0; j < QK_K/16; ++j) {
  582. int l = nearest_int(iscale*scales[j]);
  583. y[i].scales[j] = l;
  584. }
  585. y[i].d = GGML_FP32_TO_FP16(max_scale/q4scale);
  586. } else {
  587. for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0;
  588. y[i].d = GGML_FP32_TO_FP16(0.f);
  589. }
  590. if (max_min > 0) {
  591. float iscale = q4scale/max_min;
  592. for (int j = 0; j < QK_K/16; ++j) {
  593. int l = nearest_int(iscale*mins[j]);
  594. y[i].scales[j] |= (l << 4);
  595. }
  596. y[i].dmin = GGML_FP32_TO_FP16(max_min/q4scale);
  597. } else {
  598. y[i].dmin = GGML_FP32_TO_FP16(0.f);
  599. }
  600. for (int j = 0; j < QK_K/16; ++j) {
  601. const float d = GGML_FP16_TO_FP32(y[i].d) * (y[i].scales[j] & 0xF);
  602. if (!d) continue;
  603. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * (y[i].scales[j] >> 4);
  604. for (int ii = 0; ii < 16; ++ii) {
  605. int l = nearest_int((x[16*j + ii] + dm)/d);
  606. l = MAX(0, MIN(3, l));
  607. L[16*j + ii] = l;
  608. }
  609. }
  610. for (int j = 0; j < QK_K; j += 128) {
  611. for (int l = 0; l < 32; ++l) {
  612. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  613. }
  614. }
  615. x += QK_K;
  616. }
  617. }
  618. void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int64_t k) {
  619. assert(k % QK_K == 0);
  620. const int nb = k / QK_K;
  621. for (int i = 0; i < nb; i++) {
  622. const float d = GGML_FP16_TO_FP32(x[i].d);
  623. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  624. const uint8_t * q = x[i].qs;
  625. int is = 0;
  626. float dl, ml;
  627. for (int n = 0; n < QK_K; n += 128) {
  628. int shift = 0;
  629. for (int j = 0; j < 4; ++j) {
  630. uint8_t sc = x[i].scales[is++];
  631. dl = d * (sc & 0xF); ml = min * (sc >> 4);
  632. for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml;
  633. sc = x[i].scales[is++];
  634. dl = d * (sc & 0xF); ml = min * (sc >> 4);
  635. for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml;
  636. shift += 2;
  637. }
  638. q += 32;
  639. }
  640. }
  641. }
  642. static float make_qkx3_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
  643. uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
  644. float rmin, float rdelta, int nstep, bool use_mad) {
  645. float min = x[0];
  646. float max = x[0];
  647. float sum_w = weights ? weights[0] : x[0]*x[0];
  648. float sum_x = sum_w * x[0];
  649. #ifdef HAVE_BUGGY_APPLE_LINKER
  650. // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7
  651. for (volatile int i = 1; i < n; ++i) {
  652. #else
  653. for (int i = 1; i < n; ++i) {
  654. #endif
  655. if (x[i] < min) min = x[i];
  656. if (x[i] > max) max = x[i];
  657. float w = weights ? weights[i] : x[i]*x[i];
  658. sum_w += w;
  659. sum_x += w * x[i];
  660. }
  661. if (min > 0) {
  662. min = 0;
  663. }
  664. if (max <= min) {
  665. memset(L, 0, n);
  666. *the_min = -min;
  667. return 0.f;
  668. }
  669. float iscale = nmax/(max - min);
  670. float scale = 1/iscale;
  671. float best_mad = 0;
  672. for (int i = 0; i < n; ++i) {
  673. int l = nearest_int(iscale*(x[i] - min));
  674. L[i] = MAX(0, MIN(nmax, l));
  675. float diff = scale * L[i] + min - x[i];
  676. diff = use_mad ? fabsf(diff) : diff*diff;
  677. float w = weights ? weights[i] : x[i]*x[i];
  678. best_mad += w * diff;
  679. }
  680. if (nstep < 1) {
  681. *the_min = -min;
  682. return scale;
  683. }
  684. for (int is = 0; is <= nstep; ++is) {
  685. iscale = (rmin + rdelta*is + nmax)/(max - min);
  686. float sum_l = 0, sum_l2 = 0, sum_xl = 0;
  687. for (int i = 0; i < n; ++i) {
  688. int l = nearest_int(iscale*(x[i] - min));
  689. l = MAX(0, MIN(nmax, l));
  690. Laux[i] = l;
  691. float w = weights ? weights[i] : x[i]*x[i];
  692. sum_l += w*l;
  693. sum_l2 += w*l*l;
  694. sum_xl += w*l*x[i];
  695. }
  696. float D = sum_w * sum_l2 - sum_l * sum_l;
  697. if (D > 0) {
  698. float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
  699. float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
  700. if (this_min > 0) {
  701. this_min = 0;
  702. this_scale = sum_xl / sum_l2;
  703. }
  704. float mad = 0;
  705. for (int i = 0; i < n; ++i) {
  706. float diff = this_scale * Laux[i] + this_min - x[i];
  707. diff = use_mad ? fabsf(diff) : diff*diff;
  708. float w = weights ? weights[i] : x[i]*x[i];
  709. mad += w * diff;
  710. }
  711. if (mad < best_mad) {
  712. for (int i = 0; i < n; ++i) {
  713. L[i] = Laux[i];
  714. }
  715. best_mad = mad;
  716. scale = this_scale;
  717. min = this_min;
  718. }
  719. }
  720. }
  721. *the_min = -min;
  722. return scale;
  723. }
  724. static float make_qp_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, const float * quant_weights) {
  725. float max = 0;
  726. for (int i = 0; i < n; ++i) {
  727. max = MAX(max, x[i]);
  728. }
  729. if (!max) { // all zero
  730. for (int i = 0; i < n; ++i) { L[i] = 0; }
  731. return 0.f;
  732. }
  733. float iscale = nmax / max;
  734. for (int i = 0; i < n; ++i) {
  735. L[i] = nearest_int(iscale * x[i]);
  736. }
  737. float scale = 1/iscale;
  738. float best_mse = 0;
  739. for (int i = 0; i < n; ++i) {
  740. float diff = x[i] - scale*L[i];
  741. float w = quant_weights[i];
  742. best_mse += w*diff*diff;
  743. }
  744. for (int is = -4; is <= 4; ++is) {
  745. if (is == 0) continue;
  746. float iscale_is = (0.1f*is + nmax)/max;
  747. float scale_is = 1/iscale_is;
  748. float mse = 0;
  749. for (int i = 0; i < n; ++i) {
  750. int l = nearest_int(iscale_is*x[i]);
  751. l = MIN(nmax, l);
  752. float diff = x[i] - scale_is*l;
  753. float w = quant_weights[i];
  754. mse += w*diff*diff;
  755. }
  756. if (mse < best_mse) {
  757. best_mse = mse;
  758. iscale = iscale_is;
  759. }
  760. }
  761. float sumlx = 0;
  762. float suml2 = 0;
  763. for (int i = 0; i < n; ++i) {
  764. int l = nearest_int(iscale * x[i]);
  765. l = MIN(nmax, l);
  766. L[i] = l;
  767. float w = quant_weights[i];
  768. sumlx += w*x[i]*l;
  769. suml2 += w*l*l;
  770. }
  771. for (int itry = 0; itry < 5; ++itry) {
  772. int n_changed = 0;
  773. for (int i = 0; i < n; ++i) {
  774. float w = quant_weights[i];
  775. float slx = sumlx - w*x[i]*L[i];
  776. float sl2 = suml2 - w*L[i]*L[i];
  777. if (slx > 0 && sl2 > 0) {
  778. int new_l = nearest_int(x[i] * sl2 / slx);
  779. new_l = MIN(nmax, new_l);
  780. if (new_l != L[i]) {
  781. slx += w*x[i]*new_l;
  782. sl2 += w*new_l*new_l;
  783. if (slx*slx*suml2 > sumlx*sumlx*sl2) {
  784. L[i] = new_l; sumlx = slx; suml2 = sl2;
  785. ++n_changed;
  786. }
  787. }
  788. }
  789. }
  790. if (!n_changed) {
  791. break;
  792. }
  793. }
  794. return sumlx/suml2;
  795. }
  796. static void quantize_row_q2_K_impl(const float * restrict x, block_q2_K * restrict y, int k, const float * restrict quant_weights) {
  797. GGML_ASSERT(quant_weights);
  798. assert(k % QK_K == 0);
  799. const int nb = k / QK_K;
  800. const bool requantize = true;
  801. uint8_t L[QK_K];
  802. uint8_t Laux[16];
  803. float mins[QK_K/16];
  804. float scales[QK_K/16];
  805. float sw[QK_K/16];
  806. float weight[16];
  807. uint8_t Ls[QK_K/16], Lm[QK_K/16];
  808. for (int i = 0; i < nb; i++) {
  809. memset(sw, 0, QK_K/16*sizeof(float));
  810. float sumx2 = 0;
  811. for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
  812. float sigma2 = sumx2/QK_K;
  813. for (int j = 0; j < QK_K/16; ++j) {
  814. const float * restrict qw = quant_weights + QK_K * i + 16*j;
  815. for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j + l]*x[16*j + l]);
  816. for (int l = 0; l < QK_K/16; ++l) sw[j] += weight[l];
  817. scales[j] = make_qkx3_quants(16, 3, x + 16*j, weight, L + 16*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  818. }
  819. float dm, mm;
  820. dm = make_qp_quants(QK_K/16, 15, scales, Ls, sw);
  821. mm = make_qp_quants(QK_K/16, 15, mins, Lm, sw);
  822. y[i].d = GGML_FP32_TO_FP16(dm);
  823. y[i].dmin = GGML_FP32_TO_FP16(mm);
  824. dm = GGML_FP16_TO_FP32(y[i].d);
  825. mm = GGML_FP16_TO_FP32(y[i].dmin);
  826. for (int j = 0; j < QK_K/16; ++j) {
  827. y[i].scales[j] = Ls[j] | (Lm[j] << 4);
  828. }
  829. if (requantize) {
  830. for (int j = 0; j < QK_K/16; ++j) {
  831. const float d = dm * (y[i].scales[j] & 0xF);
  832. if (!d) continue;
  833. const float m = mm * (y[i].scales[j] >> 4);
  834. for (int ii = 0; ii < 16; ++ii) {
  835. int l = nearest_int((x[16*j + ii] + m)/d);
  836. l = MAX(0, MIN(3, l));
  837. L[16*j + ii] = l;
  838. }
  839. }
  840. }
  841. for (int j = 0; j < QK_K; j += 128) {
  842. for (int l = 0; l < 32; ++l) {
  843. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  844. }
  845. }
  846. x += QK_K;
  847. }
  848. }
  849. size_t quantize_q2_K(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  850. size_t row_size = ggml_row_size(GGML_TYPE_Q2_K, n_per_row);
  851. if (!quant_weights) {
  852. quantize_row_q2_K_ref(src, dst, (int64_t)nrow*n_per_row);
  853. }
  854. else {
  855. char * qrow = (char *)dst;
  856. for (int64_t row = 0; row < nrow; ++row) {
  857. quantize_row_q2_K_impl(src, (block_q2_K*)qrow, n_per_row, quant_weights);
  858. src += n_per_row;
  859. qrow += row_size;
  860. }
  861. }
  862. return nrow * row_size;
  863. }
  864. //========================= 3-bit (de)-quantization
  865. void quantize_row_q3_K_ref(const float * restrict x, block_q3_K * restrict y, int64_t k) {
  866. assert(k % QK_K == 0);
  867. const int nb = k / QK_K;
  868. int8_t L[QK_K];
  869. float scales[QK_K / 16];
  870. for (int i = 0; i < nb; i++) {
  871. float max_scale = 0;
  872. float amax = 0;
  873. for (int j = 0; j < QK_K/16; ++j) {
  874. scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true);
  875. float scale = fabsf(scales[j]);
  876. if (scale > amax) {
  877. amax = scale; max_scale = scales[j];
  878. }
  879. }
  880. memset(y[i].scales, 0, 12);
  881. if (max_scale) {
  882. float iscale = -32.f/max_scale;
  883. for (int j = 0; j < QK_K/16; ++j) {
  884. int8_t l = nearest_int(iscale*scales[j]);
  885. l = MAX(-32, MIN(31, l)) + 32;
  886. if (j < 8) {
  887. y[i].scales[j] = l & 0xF;
  888. } else {
  889. y[i].scales[j-8] |= ((l & 0xF) << 4);
  890. }
  891. l >>= 4;
  892. y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
  893. }
  894. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  895. } else {
  896. y[i].d = GGML_FP32_TO_FP16(0.f);
  897. }
  898. int8_t sc;
  899. for (int j = 0; j < QK_K/16; ++j) {
  900. sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
  901. sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
  902. float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  903. if (!d) {
  904. continue;
  905. }
  906. for (int ii = 0; ii < 16; ++ii) {
  907. int l = nearest_int(x[16*j + ii]/d);
  908. l = MAX(-4, MIN(3, l));
  909. L[16*j + ii] = l + 4;
  910. }
  911. }
  912. memset(y[i].hmask, 0, QK_K/8);
  913. // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
  914. int m = 0;
  915. uint8_t hm = 1;
  916. for (int j = 0; j < QK_K; ++j) {
  917. if (L[j] > 3) {
  918. y[i].hmask[m] |= hm;
  919. L[j] -= 4;
  920. }
  921. if (++m == QK_K/8) {
  922. m = 0; hm <<= 1;
  923. }
  924. }
  925. for (int j = 0; j < QK_K; j += 128) {
  926. for (int l = 0; l < 32; ++l) {
  927. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  928. }
  929. }
  930. x += QK_K;
  931. }
  932. }
  933. void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int64_t k) {
  934. assert(k % QK_K == 0);
  935. const int nb = k / QK_K;
  936. const uint32_t kmask1 = 0x03030303;
  937. const uint32_t kmask2 = 0x0f0f0f0f;
  938. uint32_t aux[4];
  939. const int8_t * scales = (const int8_t*)aux;
  940. for (int i = 0; i < nb; i++) {
  941. const float d_all = GGML_FP16_TO_FP32(x[i].d);
  942. const uint8_t * restrict q = x[i].qs;
  943. const uint8_t * restrict hm = x[i].hmask;
  944. uint8_t m = 1;
  945. memcpy(aux, x[i].scales, 12);
  946. uint32_t tmp = aux[2];
  947. aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
  948. aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
  949. aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
  950. aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
  951. int is = 0;
  952. float dl;
  953. for (int n = 0; n < QK_K; n += 128) {
  954. int shift = 0;
  955. for (int j = 0; j < 4; ++j) {
  956. dl = d_all * (scales[is++] - 32);
  957. for (int l = 0; l < 16; ++l) {
  958. *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4));
  959. }
  960. dl = d_all * (scales[is++] - 32);
  961. for (int l = 0; l < 16; ++l) {
  962. *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4));
  963. }
  964. shift += 2;
  965. m <<= 1;
  966. }
  967. q += 32;
  968. }
  969. }
  970. }
  971. static void quantize_row_q3_K_impl(const float * restrict x, block_q3_K * restrict y, int64_t n_per_row, const float * restrict quant_weights) {
  972. assert(n_per_row % QK_K == 0);
  973. const int nb = n_per_row / QK_K;
  974. int8_t L[QK_K];
  975. float scales[QK_K / 16];
  976. float weight[16];
  977. float sw[QK_K / 16];
  978. int8_t Ls[QK_K / 16];
  979. for (int i = 0; i < nb; i++) {
  980. float sumx2 = 0;
  981. for (int j = 0; j < QK_K; ++j) sumx2 += x[j]*x[j];
  982. float sigma2 = 2*sumx2/QK_K;
  983. for (int j = 0; j < QK_K/16; ++j) {
  984. if (quant_weights) {
  985. const float * qw = quant_weights + QK_K * i + 16*j;
  986. for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j+l]*x[16*j+l]);
  987. } else {
  988. for (int l = 0; l < 16; ++l) weight[l] = x[16*j+l]*x[16*j+l];
  989. }
  990. float sumw = 0;
  991. for (int l = 0; l < 16; ++l) sumw += weight[l];
  992. sw[j] = sumw;
  993. scales[j] = make_qx_quants(16, 4, x + 16*j, L + 16*j, 1, weight);
  994. }
  995. memset(y[i].scales, 0, 12);
  996. float d_block = make_qx_quants(QK_K/16, 32, scales, Ls, 1, sw);
  997. for (int j = 0; j < QK_K/16; ++j) {
  998. int l = Ls[j];
  999. if (j < 8) {
  1000. y[i].scales[j] = l & 0xF;
  1001. } else {
  1002. y[i].scales[j-8] |= ((l & 0xF) << 4);
  1003. }
  1004. l >>= 4;
  1005. y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
  1006. }
  1007. y[i].d = GGML_FP32_TO_FP16(d_block);
  1008. int8_t sc;
  1009. for (int j = 0; j < QK_K/16; ++j) {
  1010. sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
  1011. sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
  1012. float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1013. if (!d) {
  1014. continue;
  1015. }
  1016. for (int ii = 0; ii < 16; ++ii) {
  1017. int l = nearest_int(x[16*j + ii]/d);
  1018. l = MAX(-4, MIN(3, l));
  1019. L[16*j + ii] = l + 4;
  1020. }
  1021. }
  1022. memset(y[i].hmask, 0, QK_K/8);
  1023. // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
  1024. int m = 0;
  1025. uint8_t hm = 1;
  1026. for (int j = 0; j < QK_K; ++j) {
  1027. if (L[j] > 3) {
  1028. y[i].hmask[m] |= hm;
  1029. L[j] -= 4;
  1030. }
  1031. if (++m == QK_K/8) {
  1032. m = 0; hm <<= 1;
  1033. }
  1034. }
  1035. for (int j = 0; j < QK_K; j += 128) {
  1036. for (int l = 0; l < 32; ++l) {
  1037. y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
  1038. }
  1039. }
  1040. x += QK_K;
  1041. }
  1042. }
  1043. size_t quantize_q3_K(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1044. size_t row_size = ggml_row_size(GGML_TYPE_Q3_K, n_per_row);
  1045. if (!quant_weights) {
  1046. quantize_row_q3_K_ref(src, dst, (int64_t)nrow*n_per_row);
  1047. }
  1048. else {
  1049. char * qrow = (char *)dst;
  1050. for (int64_t row = 0; row < nrow; ++row) {
  1051. quantize_row_q3_K_impl(src, (block_q3_K*)qrow, n_per_row, quant_weights);
  1052. src += n_per_row;
  1053. qrow += row_size;
  1054. }
  1055. }
  1056. return nrow * row_size;
  1057. }
  1058. // ====================== 4-bit (de)-quantization
  1059. void quantize_row_q4_K_ref(const float * restrict x, block_q4_K * restrict y, int64_t k) {
  1060. assert(k % QK_K == 0);
  1061. const int nb = k / QK_K;
  1062. uint8_t L[QK_K];
  1063. uint8_t Laux[32];
  1064. float weights[32];
  1065. float mins[QK_K/32];
  1066. float scales[QK_K/32];
  1067. for (int i = 0; i < nb; i++) {
  1068. float max_scale = 0; // as we are deducting the min, scales are always positive
  1069. float max_min = 0;
  1070. for (int j = 0; j < QK_K/32; ++j) {
  1071. //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
  1072. float sum_x2 = 0;
  1073. for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
  1074. float av_x = sqrtf(sum_x2/32);
  1075. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  1076. scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false);
  1077. float scale = scales[j];
  1078. if (scale > max_scale) {
  1079. max_scale = scale;
  1080. }
  1081. float min = mins[j];
  1082. if (min > max_min) {
  1083. max_min = min;
  1084. }
  1085. }
  1086. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  1087. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  1088. for (int j = 0; j < QK_K/32; ++j) {
  1089. uint8_t ls = nearest_int(inv_scale*scales[j]);
  1090. uint8_t lm = nearest_int(inv_min*mins[j]);
  1091. ls = MIN(63, ls);
  1092. lm = MIN(63, lm);
  1093. if (j < 4) {
  1094. y[i].scales[j] = ls;
  1095. y[i].scales[j+4] = lm;
  1096. } else {
  1097. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  1098. y[i].scales[j-4] |= ((ls >> 4) << 6);
  1099. y[i].scales[j-0] |= ((lm >> 4) << 6);
  1100. }
  1101. }
  1102. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  1103. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  1104. uint8_t sc, m;
  1105. for (int j = 0; j < QK_K/32; ++j) {
  1106. get_scale_min_k4(j, y[i].scales, &sc, &m);
  1107. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1108. if (!d) continue;
  1109. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  1110. for (int ii = 0; ii < 32; ++ii) {
  1111. int l = nearest_int((x[32*j + ii] + dm)/d);
  1112. l = MAX(0, MIN(15, l));
  1113. L[32*j + ii] = l;
  1114. }
  1115. }
  1116. uint8_t * q = y[i].qs;
  1117. for (int j = 0; j < QK_K; j += 64) {
  1118. for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
  1119. q += 32;
  1120. }
  1121. x += QK_K;
  1122. }
  1123. }
  1124. void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int64_t k) {
  1125. assert(k % QK_K == 0);
  1126. const int nb = k / QK_K;
  1127. for (int i = 0; i < nb; i++) {
  1128. const uint8_t * q = x[i].qs;
  1129. const float d = GGML_FP16_TO_FP32(x[i].d);
  1130. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  1131. int is = 0;
  1132. uint8_t sc, m;
  1133. for (int j = 0; j < QK_K; j += 64) {
  1134. get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
  1135. const float d1 = d * sc; const float m1 = min * m;
  1136. get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
  1137. const float d2 = d * sc; const float m2 = min * m;
  1138. for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1;
  1139. for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2;
  1140. q += 32; is += 2;
  1141. }
  1142. }
  1143. }
  1144. static void quantize_row_q4_K_impl(const float * restrict x, block_q4_K * restrict y, int64_t n_per_row, const float * quant_weights) {
  1145. assert(n_per_row % QK_K == 0);
  1146. const int64_t nb = n_per_row / QK_K;
  1147. uint8_t L[QK_K];
  1148. uint8_t Laux[32];
  1149. uint8_t Ls[QK_K/32];
  1150. uint8_t Lm[QK_K/32];
  1151. float weights[32];
  1152. float sw[QK_K/32];
  1153. float mins[QK_K/32];
  1154. float scales[QK_K/32];
  1155. for (int i = 0; i < nb; i++) {
  1156. float sum_x2 = 0;
  1157. for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
  1158. float sigma2 = 2*sum_x2/QK_K;
  1159. float av_x = sqrtf(sigma2);
  1160. for (int j = 0; j < QK_K/32; ++j) {
  1161. if (quant_weights) {
  1162. const float * qw = quant_weights + QK_K*i + 32*j;
  1163. for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
  1164. } else {
  1165. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  1166. }
  1167. float sumw = 0;
  1168. for (int l = 0; l < 32; ++l) sumw += weights[l];
  1169. sw[j] = sumw;
  1170. scales[j] = make_qkx3_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  1171. }
  1172. float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw);
  1173. float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw);
  1174. for (int j = 0; j < QK_K/32; ++j) {
  1175. uint8_t ls = Ls[j];
  1176. uint8_t lm = Lm[j];
  1177. if (j < 4) {
  1178. y[i].scales[j] = ls;
  1179. y[i].scales[j+4] = lm;
  1180. } else {
  1181. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  1182. y[i].scales[j-4] |= ((ls >> 4) << 6);
  1183. y[i].scales[j-0] |= ((lm >> 4) << 6);
  1184. }
  1185. }
  1186. y[i].d = GGML_FP32_TO_FP16(d_block);
  1187. y[i].dmin = GGML_FP32_TO_FP16(m_block);
  1188. uint8_t sc, m;
  1189. for (int j = 0; j < QK_K/32; ++j) {
  1190. get_scale_min_k4(j, y[i].scales, &sc, &m);
  1191. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1192. if (!d) continue;
  1193. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  1194. for (int ii = 0; ii < 32; ++ii) {
  1195. int l = nearest_int((x[32*j + ii] + dm)/d);
  1196. l = MAX(0, MIN(15, l));
  1197. L[32*j + ii] = l;
  1198. }
  1199. }
  1200. uint8_t * q = y[i].qs;
  1201. for (int j = 0; j < QK_K; j += 64) {
  1202. for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
  1203. q += 32;
  1204. }
  1205. x += QK_K;
  1206. }
  1207. }
  1208. size_t quantize_q4_K(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1209. size_t row_size = ggml_row_size(GGML_TYPE_Q4_K, n_per_row);
  1210. if (!quant_weights) {
  1211. quantize_row_q4_K_ref(src, dst, (int64_t)nrow*n_per_row);
  1212. }
  1213. else {
  1214. char * qrow = (char *)dst;
  1215. for (int64_t row = 0; row < nrow; ++row) {
  1216. quantize_row_q4_K_impl(src, (block_q4_K*)qrow, n_per_row, quant_weights);
  1217. src += n_per_row;
  1218. qrow += row_size;
  1219. }
  1220. }
  1221. return nrow * row_size;
  1222. }
  1223. // ====================== 5-bit (de)-quantization
  1224. void quantize_row_q5_K_ref(const float * restrict x, block_q5_K * restrict y, int64_t k) {
  1225. assert(k % QK_K == 0);
  1226. const int64_t nb = k / QK_K;
  1227. uint8_t L[QK_K];
  1228. float mins[QK_K/32];
  1229. float scales[QK_K/32];
  1230. float weights[32];
  1231. uint8_t Laux[32];
  1232. for (int i = 0; i < nb; i++) {
  1233. float max_scale = 0; // as we are deducting the min, scales are always positive
  1234. float max_min = 0;
  1235. for (int j = 0; j < QK_K/32; ++j) {
  1236. //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
  1237. float sum_x2 = 0;
  1238. for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
  1239. float av_x = sqrtf(sum_x2/32);
  1240. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  1241. scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false);
  1242. float scale = scales[j];
  1243. if (scale > max_scale) {
  1244. max_scale = scale;
  1245. }
  1246. float min = mins[j];
  1247. if (min > max_min) {
  1248. max_min = min;
  1249. }
  1250. }
  1251. float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
  1252. float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
  1253. for (int j = 0; j < QK_K/32; ++j) {
  1254. uint8_t ls = nearest_int(inv_scale*scales[j]);
  1255. uint8_t lm = nearest_int(inv_min*mins[j]);
  1256. ls = MIN(63, ls);
  1257. lm = MIN(63, lm);
  1258. if (j < 4) {
  1259. y[i].scales[j] = ls;
  1260. y[i].scales[j+4] = lm;
  1261. } else {
  1262. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  1263. y[i].scales[j-4] |= ((ls >> 4) << 6);
  1264. y[i].scales[j-0] |= ((lm >> 4) << 6);
  1265. }
  1266. }
  1267. y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
  1268. y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
  1269. uint8_t sc, m;
  1270. for (int j = 0; j < QK_K/32; ++j) {
  1271. get_scale_min_k4(j, y[i].scales, &sc, &m);
  1272. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1273. if (!d) continue;
  1274. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  1275. for (int ii = 0; ii < 32; ++ii) {
  1276. int l = nearest_int((x[32*j + ii] + dm)/d);
  1277. l = MAX(0, MIN(31, l));
  1278. L[32*j + ii] = l;
  1279. }
  1280. }
  1281. uint8_t * restrict qh = y[i].qh;
  1282. uint8_t * restrict ql = y[i].qs;
  1283. memset(qh, 0, QK_K/8);
  1284. uint8_t m1 = 1, m2 = 2;
  1285. for (int n = 0; n < QK_K; n += 64) {
  1286. for (int j = 0; j < 32; ++j) {
  1287. int l1 = L[n + j];
  1288. if (l1 > 15) {
  1289. l1 -= 16; qh[j] |= m1;
  1290. }
  1291. int l2 = L[n + j + 32];
  1292. if (l2 > 15) {
  1293. l2 -= 16; qh[j] |= m2;
  1294. }
  1295. ql[j] = l1 | (l2 << 4);
  1296. }
  1297. m1 <<= 2; m2 <<= 2;
  1298. ql += 32;
  1299. }
  1300. x += QK_K;
  1301. }
  1302. }
  1303. void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int64_t k) {
  1304. assert(k % QK_K == 0);
  1305. const int64_t nb = k / QK_K;
  1306. for (int i = 0; i < nb; i++) {
  1307. const uint8_t * ql = x[i].qs;
  1308. const uint8_t * qh = x[i].qh;
  1309. const float d = GGML_FP16_TO_FP32(x[i].d);
  1310. const float min = GGML_FP16_TO_FP32(x[i].dmin);
  1311. int is = 0;
  1312. uint8_t sc, m;
  1313. uint8_t u1 = 1, u2 = 2;
  1314. for (int j = 0; j < QK_K; j += 64) {
  1315. get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
  1316. const float d1 = d * sc; const float m1 = min * m;
  1317. get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
  1318. const float d2 = d * sc; const float m2 = min * m;
  1319. for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1;
  1320. for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2;
  1321. ql += 32; is += 2;
  1322. u1 <<= 2; u2 <<= 2;
  1323. }
  1324. }
  1325. }
  1326. static void quantize_row_q5_K_impl(const float * restrict x, block_q5_K * restrict y, int64_t n_per_row, const float * quant_weights) {
  1327. assert(n_per_row % QK_K == 0);
  1328. const int64_t nb = n_per_row / QK_K;
  1329. uint8_t L[QK_K];
  1330. uint8_t Laux[32];
  1331. uint8_t Ls[QK_K/32];
  1332. uint8_t Lm[QK_K/32];
  1333. float mins[QK_K/32];
  1334. float scales[QK_K/32];
  1335. float sw[QK_K/32];
  1336. float weights[32];
  1337. for (int i = 0; i < nb; i++) {
  1338. float sum_x2 = 0;
  1339. for (int l = 0; l < QK_K; ++l) sum_x2 += x[l] * x[l];
  1340. float sigma2 = 2*sum_x2/QK_K;
  1341. float av_x = sqrtf(sigma2);
  1342. for (int j = 0; j < QK_K/32; ++j) {
  1343. if (quant_weights) {
  1344. const float * qw = quant_weights + QK_K*i + 32*j;
  1345. for (int l = 0; l < 32; ++l) weights[l] = qw[l] * sqrtf(sigma2 + x[32*j + l]*x[32*j + l]);
  1346. } else {
  1347. for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
  1348. }
  1349. float sumw = 0;
  1350. for (int l = 0; l < 32; ++l) sumw += weights[l];
  1351. sw[j] = sumw;
  1352. scales[j] = make_qkx3_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.9f, 0.05f, 36, false);
  1353. }
  1354. float d_block = make_qp_quants(QK_K/32, 63, scales, Ls, sw);
  1355. float m_block = make_qp_quants(QK_K/32, 63, mins, Lm, sw);
  1356. for (int j = 0; j < QK_K/32; ++j) {
  1357. uint8_t ls = Ls[j];
  1358. uint8_t lm = Lm[j];
  1359. ls = MIN(63, ls);
  1360. lm = MIN(63, lm);
  1361. if (j < 4) {
  1362. y[i].scales[j] = ls;
  1363. y[i].scales[j+4] = lm;
  1364. } else {
  1365. y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
  1366. y[i].scales[j-4] |= ((ls >> 4) << 6);
  1367. y[i].scales[j-0] |= ((lm >> 4) << 6);
  1368. }
  1369. }
  1370. y[i].d = GGML_FP32_TO_FP16(d_block);
  1371. y[i].dmin = GGML_FP32_TO_FP16(m_block);
  1372. uint8_t sc, m;
  1373. for (int j = 0; j < QK_K/32; ++j) {
  1374. get_scale_min_k4(j, y[i].scales, &sc, &m);
  1375. const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
  1376. if (!d) continue;
  1377. const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
  1378. for (int ii = 0; ii < 32; ++ii) {
  1379. int l = nearest_int((x[32*j + ii] + dm)/d);
  1380. l = MAX(0, MIN(31, l));
  1381. L[32*j + ii] = l;
  1382. }
  1383. }
  1384. uint8_t * restrict qh = y[i].qh;
  1385. uint8_t * restrict ql = y[i].qs;
  1386. memset(qh, 0, QK_K/8);
  1387. uint8_t m1 = 1, m2 = 2;
  1388. for (int n = 0; n < QK_K; n += 64) {
  1389. for (int j = 0; j < 32; ++j) {
  1390. int l1 = L[n + j];
  1391. if (l1 > 15) {
  1392. l1 -= 16; qh[j] |= m1;
  1393. }
  1394. int l2 = L[n + j + 32];
  1395. if (l2 > 15) {
  1396. l2 -= 16; qh[j] |= m2;
  1397. }
  1398. ql[j] = l1 | (l2 << 4);
  1399. }
  1400. m1 <<= 2; m2 <<= 2;
  1401. ql += 32;
  1402. }
  1403. x += QK_K;
  1404. }
  1405. }
  1406. size_t quantize_q5_K(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1407. size_t row_size = ggml_row_size(GGML_TYPE_Q5_K, n_per_row);
  1408. if (!quant_weights) {
  1409. quantize_row_q5_K_ref(src, dst, (int64_t)nrow*n_per_row);
  1410. }
  1411. else {
  1412. char * qrow = (char *)dst;
  1413. for (int64_t row = 0; row < nrow; ++row) {
  1414. quantize_row_q5_K_impl(src, (block_q5_K*)qrow, n_per_row, quant_weights);
  1415. src += n_per_row;
  1416. qrow += row_size;
  1417. }
  1418. }
  1419. return nrow * row_size;
  1420. }
  1421. // ====================== 6-bit (de)-quantization
  1422. void quantize_row_q6_K_ref(const float * restrict x, block_q6_K * restrict y, int64_t k) {
  1423. assert(k % QK_K == 0);
  1424. const int64_t nb = k / QK_K;
  1425. int8_t L[QK_K];
  1426. float scales[QK_K/16];
  1427. for (int i = 0; i < nb; i++) {
  1428. float max_scale = 0;
  1429. float max_abs_scale = 0;
  1430. for (int ib = 0; ib < QK_K/16; ++ib) {
  1431. const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
  1432. scales[ib] = scale;
  1433. const float abs_scale = fabsf(scale);
  1434. if (abs_scale > max_abs_scale) {
  1435. max_abs_scale = abs_scale;
  1436. max_scale = scale;
  1437. }
  1438. }
  1439. if (max_abs_scale < GROUP_MAX_EPS) {
  1440. memset(&y[i], 0, sizeof(block_q6_K));
  1441. y[i].d = GGML_FP32_TO_FP16(0.f);
  1442. x += QK_K;
  1443. continue;
  1444. }
  1445. float iscale = -128.f/max_scale;
  1446. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  1447. for (int ib = 0; ib < QK_K/16; ++ib) {
  1448. y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
  1449. }
  1450. for (int j = 0; j < QK_K/16; ++j) {
  1451. float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  1452. if (!d) {
  1453. continue;
  1454. }
  1455. for (int ii = 0; ii < 16; ++ii) {
  1456. int l = nearest_int(x[16*j + ii]/d);
  1457. l = MAX(-32, MIN(31, l));
  1458. L[16*j + ii] = l + 32;
  1459. }
  1460. }
  1461. uint8_t * restrict ql = y[i].ql;
  1462. uint8_t * restrict qh = y[i].qh;
  1463. for (int j = 0; j < QK_K; j += 128) {
  1464. for (int l = 0; l < 32; ++l) {
  1465. const uint8_t q1 = L[j + l + 0] & 0xF;
  1466. const uint8_t q2 = L[j + l + 32] & 0xF;
  1467. const uint8_t q3 = L[j + l + 64] & 0xF;
  1468. const uint8_t q4 = L[j + l + 96] & 0xF;
  1469. ql[l+ 0] = q1 | (q3 << 4);
  1470. ql[l+32] = q2 | (q4 << 4);
  1471. qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
  1472. }
  1473. ql += 64;
  1474. qh += 32;
  1475. }
  1476. x += QK_K;
  1477. }
  1478. }
  1479. void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int64_t k) {
  1480. assert(k % QK_K == 0);
  1481. const int64_t nb = k / QK_K;
  1482. for (int i = 0; i < nb; i++) {
  1483. const float d = GGML_FP16_TO_FP32(x[i].d);
  1484. const uint8_t * restrict ql = x[i].ql;
  1485. const uint8_t * restrict qh = x[i].qh;
  1486. const int8_t * restrict sc = x[i].scales;
  1487. for (int n = 0; n < QK_K; n += 128) {
  1488. for (int l = 0; l < 32; ++l) {
  1489. int is = l/16;
  1490. const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
  1491. const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
  1492. const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
  1493. const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
  1494. y[l + 0] = d * sc[is + 0] * q1;
  1495. y[l + 32] = d * sc[is + 2] * q2;
  1496. y[l + 64] = d * sc[is + 4] * q3;
  1497. y[l + 96] = d * sc[is + 6] * q4;
  1498. }
  1499. y += 128;
  1500. ql += 64;
  1501. qh += 32;
  1502. sc += 8;
  1503. }
  1504. }
  1505. }
  1506. static void quantize_row_q6_K_impl(const float * restrict x, block_q6_K * restrict y, int64_t n_per_row, const float * quant_weights) {
  1507. assert(n_per_row % QK_K == 0);
  1508. const int64_t nb = n_per_row / QK_K;
  1509. int8_t L[QK_K];
  1510. float scales[QK_K/16];
  1511. //float weights[16];
  1512. for (int i = 0; i < nb; i++) {
  1513. //float sum_x2 = 0;
  1514. //for (int j = 0; j < QK_K; ++j) sum_x2 += x[j]*x[j];
  1515. //float sigma2 = sum_x2/QK_K;
  1516. float max_scale = 0;
  1517. float max_abs_scale = 0;
  1518. for (int ib = 0; ib < QK_K/16; ++ib) {
  1519. float scale;
  1520. if (quant_weights) {
  1521. const float * qw = quant_weights + QK_K*i + 16*ib;
  1522. //for (int j = 0; j < 16; ++j) weights[j] = qw[j] * sqrtf(sigma2 + x[16*ib + j]*x[16*ib + j]);
  1523. //scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, weights);
  1524. scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, qw);
  1525. } else {
  1526. scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1, NULL);
  1527. }
  1528. scales[ib] = scale;
  1529. const float abs_scale = fabsf(scale);
  1530. if (abs_scale > max_abs_scale) {
  1531. max_abs_scale = abs_scale;
  1532. max_scale = scale;
  1533. }
  1534. }
  1535. if (max_abs_scale < GROUP_MAX_EPS) {
  1536. memset(&y[i], 0, sizeof(block_q6_K));
  1537. y[i].d = GGML_FP32_TO_FP16(0.f);
  1538. x += QK_K;
  1539. continue;
  1540. }
  1541. float iscale = -128.f/max_scale;
  1542. y[i].d = GGML_FP32_TO_FP16(1/iscale);
  1543. for (int ib = 0; ib < QK_K/16; ++ib) {
  1544. y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
  1545. }
  1546. for (int j = 0; j < QK_K/16; ++j) {
  1547. float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
  1548. if (!d) {
  1549. continue;
  1550. }
  1551. for (int ii = 0; ii < 16; ++ii) {
  1552. int l = nearest_int(x[16*j + ii]/d);
  1553. l = MAX(-32, MIN(31, l));
  1554. L[16*j + ii] = l + 32;
  1555. }
  1556. }
  1557. uint8_t * restrict ql = y[i].ql;
  1558. uint8_t * restrict qh = y[i].qh;
  1559. for (int j = 0; j < QK_K; j += 128) {
  1560. for (int l = 0; l < 32; ++l) {
  1561. const uint8_t q1 = L[j + l + 0] & 0xF;
  1562. const uint8_t q2 = L[j + l + 32] & 0xF;
  1563. const uint8_t q3 = L[j + l + 64] & 0xF;
  1564. const uint8_t q4 = L[j + l + 96] & 0xF;
  1565. ql[l+ 0] = q1 | (q3 << 4);
  1566. ql[l+32] = q2 | (q4 << 4);
  1567. qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
  1568. }
  1569. ql += 64;
  1570. qh += 32;
  1571. }
  1572. x += QK_K;
  1573. }
  1574. }
  1575. size_t quantize_q6_K(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1576. size_t row_size = ggml_row_size(GGML_TYPE_Q6_K, n_per_row);
  1577. if (!quant_weights) {
  1578. quantize_row_q6_K_ref(src, dst, (int64_t)nrow*n_per_row);
  1579. }
  1580. else {
  1581. char * qrow = (char *)dst;
  1582. for (int64_t row = 0; row < nrow; ++row) {
  1583. quantize_row_q6_K_impl(src, (block_q6_K*)qrow, n_per_row, quant_weights);
  1584. src += n_per_row;
  1585. qrow += row_size;
  1586. }
  1587. }
  1588. return nrow * row_size;
  1589. }
  1590. static void quantize_row_q4_0_impl(const float * restrict x, block_q4_0 * restrict y, int64_t n_per_row, const float * quant_weights) {
  1591. static_assert(QK4_0 == 32, "QK4_0 must be 32");
  1592. if (!quant_weights) {
  1593. quantize_row_q4_0_ref(x, y, n_per_row);
  1594. return;
  1595. }
  1596. float weight[QK4_0];
  1597. int8_t L[QK4_0];
  1598. float sum_x2 = 0;
  1599. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  1600. float sigma2 = sum_x2/n_per_row;
  1601. const int64_t nb = n_per_row/QK4_0;
  1602. for (int ib = 0; ib < nb; ++ib) {
  1603. const float * xb = x + QK4_0 * ib;
  1604. const float * qw = quant_weights + QK4_0 * ib;
  1605. for (int j = 0; j < QK4_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  1606. float d = make_qx_quants(QK4_0, 8, xb, L, 1, weight);
  1607. y[ib].d = GGML_FP32_TO_FP16(d);
  1608. for (int j = 0; j < 16; ++j) {
  1609. y[ib].qs[j] = L[j] | (L[j+16] << 4);
  1610. }
  1611. }
  1612. }
  1613. size_t quantize_q4_0(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1614. if (!quant_weights) {
  1615. quantize_row_q4_0_ref(src, dst, (int64_t)nrow*n_per_row);
  1616. return nrow * ggml_row_size(GGML_TYPE_Q4_0, n_per_row);
  1617. }
  1618. size_t row_size = ggml_row_size(GGML_TYPE_Q4_0, n_per_row);
  1619. char * qrow = (char *)dst;
  1620. for (int64_t row = 0; row < nrow; ++row) {
  1621. quantize_row_q4_0_impl(src, (block_q4_0*)qrow, n_per_row, quant_weights);
  1622. src += n_per_row;
  1623. qrow += row_size;
  1624. }
  1625. return nrow * row_size;
  1626. }
  1627. static void quantize_row_q4_1_impl(const float * restrict x, block_q4_1 * restrict y, int64_t n_per_row, const float * quant_weights) {
  1628. static_assert(QK4_1 == 32, "QK4_1 must be 32");
  1629. if (!quant_weights) {
  1630. quantize_row_q4_1_ref(x, y, n_per_row);
  1631. return;
  1632. }
  1633. float weight[QK4_1];
  1634. uint8_t L[QK4_1], Laux[QK4_1];
  1635. float sum_x2 = 0;
  1636. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  1637. float sigma2 = sum_x2/n_per_row;
  1638. const int64_t nb = n_per_row/QK4_1;
  1639. for (int ib = 0; ib < nb; ++ib) {
  1640. const float * xb = x + QK4_1 * ib;
  1641. const float * qw = quant_weights + QK4_1 * ib;
  1642. for (int j = 0; j < QK4_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  1643. float min;
  1644. float d = make_qkx3_quants(QK4_1, 15, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
  1645. y[ib].d = GGML_FP32_TO_FP16(d);
  1646. y[ib].m = GGML_FP32_TO_FP16(-min);
  1647. for (int j = 0; j < 16; ++j) {
  1648. y[ib].qs[j] = L[j] | (L[j+16] << 4);
  1649. }
  1650. }
  1651. }
  1652. size_t quantize_q4_1(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1653. if (!quant_weights) {
  1654. quantize_row_q4_1_ref(src, dst, (int64_t)nrow*n_per_row);
  1655. return nrow * ggml_row_size(GGML_TYPE_Q4_1, n_per_row);
  1656. }
  1657. size_t row_size = ggml_row_size(GGML_TYPE_Q4_1, n_per_row);
  1658. char * qrow = (char *)dst;
  1659. for (int64_t row = 0; row < nrow; ++row) {
  1660. quantize_row_q4_1_impl(src, (block_q4_1*)qrow, n_per_row, quant_weights);
  1661. src += n_per_row;
  1662. qrow += row_size;
  1663. }
  1664. return nrow * row_size;
  1665. }
  1666. static void quantize_row_q5_0_impl(const float * restrict x, block_q5_0 * restrict y, int64_t n_per_row, const float * quant_weights) {
  1667. static_assert(QK5_0 == 32, "QK5_0 must be 32");
  1668. if (!quant_weights) {
  1669. quantize_row_q5_0_ref(x, y, n_per_row);
  1670. return;
  1671. }
  1672. float weight[QK5_0];
  1673. int8_t L[QK5_0];
  1674. float sum_x2 = 0;
  1675. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  1676. float sigma2 = sum_x2/n_per_row;
  1677. const int64_t nb = n_per_row/QK5_0;
  1678. for (int ib = 0; ib < nb; ++ib) {
  1679. const float * xb = x + QK5_0 * ib;
  1680. const float * qw = quant_weights + QK5_0 * ib;
  1681. for (int j = 0; j < QK5_0; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  1682. float d = make_qx_quants(QK5_0, 16, xb, L, 1, weight);
  1683. y[ib].d = GGML_FP32_TO_FP16(d);
  1684. uint32_t qh = 0;
  1685. for (int j = 0; j < 16; ++j) {
  1686. const uint8_t xi0 = L[j];
  1687. const uint8_t xi1 = L[j+16];
  1688. y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  1689. // get the 5-th bit and store it in qh at the right position
  1690. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  1691. qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
  1692. }
  1693. memcpy(&y[ib].qh, &qh, sizeof(qh));
  1694. }
  1695. }
  1696. size_t quantize_q5_0(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1697. if (!quant_weights) {
  1698. quantize_row_q5_0_ref(src, dst, (int64_t)nrow*n_per_row);
  1699. return nrow * ggml_row_size(GGML_TYPE_Q5_0, n_per_row);
  1700. }
  1701. size_t row_size = ggml_row_size(GGML_TYPE_Q5_0, n_per_row);
  1702. char * qrow = (char *)dst;
  1703. for (int64_t row = 0; row < nrow; ++row) {
  1704. quantize_row_q5_0_impl(src, (block_q5_0*)qrow, n_per_row, quant_weights);
  1705. src += n_per_row;
  1706. qrow += row_size;
  1707. }
  1708. return nrow * row_size;
  1709. }
  1710. static void quantize_row_q5_1_impl(const float * restrict x, block_q5_1 * restrict y, int64_t n_per_row, const float * quant_weights) {
  1711. static_assert(QK5_1 == 32, "QK5_1 must be 32");
  1712. if (!quant_weights) {
  1713. quantize_row_q5_1_ref(x, y, n_per_row);
  1714. return;
  1715. }
  1716. float weight[QK5_1];
  1717. uint8_t L[QK5_1], Laux[QK5_1];
  1718. float sum_x2 = 0;
  1719. for (int j = 0; j < n_per_row; ++j) sum_x2 += x[j]*x[j];
  1720. float sigma2 = sum_x2/n_per_row;
  1721. const int64_t nb = n_per_row/QK5_1;
  1722. for (int ib = 0; ib < nb; ++ib) {
  1723. const float * xb = x + QK5_1 * ib;
  1724. const float * qw = quant_weights + QK5_1 * ib;
  1725. for (int j = 0; j < QK5_1; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  1726. float min;
  1727. float d = make_qkx3_quants(QK5_1, 31, xb, weight, L, &min, Laux, -0.9f, 0.05f, 36, false);
  1728. y[ib].d = GGML_FP32_TO_FP16(d);
  1729. y[ib].m = GGML_FP32_TO_FP16(-min);
  1730. uint32_t qh = 0;
  1731. for (int j = 0; j < 16; ++j) {
  1732. const uint8_t xi0 = L[j];
  1733. const uint8_t xi1 = L[j+16];
  1734. y[ib].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
  1735. // get the 5-th bit and store it in qh at the right position
  1736. qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
  1737. qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
  1738. }
  1739. memcpy(&y[ib].qh, &qh, sizeof(qh));
  1740. }
  1741. }
  1742. size_t quantize_q5_1(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1743. if (!quant_weights) {
  1744. quantize_row_q5_1_ref(src, dst, (int64_t)nrow*n_per_row);
  1745. return nrow * ggml_row_size(GGML_TYPE_Q5_1, n_per_row);
  1746. }
  1747. size_t row_size = ggml_row_size(GGML_TYPE_Q5_1, n_per_row);
  1748. char * qrow = (char *)dst;
  1749. for (int64_t row = 0; row < nrow; ++row) {
  1750. quantize_row_q5_1_impl(src, (block_q5_1*)qrow, n_per_row, quant_weights);
  1751. src += n_per_row;
  1752. qrow += row_size;
  1753. }
  1754. return nrow * row_size;
  1755. }
  1756. size_t quantize_q8_0(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1757. (void)quant_weights; // not used
  1758. const size_t row_size = ggml_row_size(GGML_TYPE_Q8_0, n_per_row);
  1759. quantize_row_q8_0_ref(src, dst, (int64_t)nrow*n_per_row);
  1760. return nrow * row_size;
  1761. }
  1762. // ====================== Ternary (de)-quantization (BitNet b1.58 and TriLMs)
  1763. void quantize_row_tq1_0_ref(const float * restrict x, block_tq1_0 * restrict y, int64_t k) {
  1764. assert(k % QK_K == 0);
  1765. const int64_t nb = k / QK_K;
  1766. for (int64_t i = 0; i < nb; i++) {
  1767. float amax = 0.0f; // absolute max
  1768. for (int j = 0; j < QK_K; j++) {
  1769. const float v = x[j];
  1770. amax = MAX(amax, fabsf(v));
  1771. }
  1772. const float d = amax;
  1773. const float id = d ? 1.0f/d : 0.0f;
  1774. y[i].d = GGML_FP32_TO_FP16(d);
  1775. // 5 elements per byte, along 32 bytes
  1776. for (size_t j = 0; j < sizeof(y->qs) - sizeof(y->qs) % 32; j += 32) {
  1777. for (size_t m = 0; m < 32; ++m) {
  1778. uint8_t q = 0;
  1779. for (size_t n = 0; n < 5; ++n) {
  1780. int xi = lroundf(x[m + n*32] * id) + 1; // -1, 0, 1 -> 0, 1, 2
  1781. q *= 3;
  1782. q += xi;
  1783. }
  1784. // ceiling division (243 == pow(3, 5))
  1785. q = ((uint16_t)q * 256 + (243 - 1)) / 243;
  1786. y[i].qs[j + m] = q;
  1787. }
  1788. x += 5*32;
  1789. }
  1790. // along 16 bytes
  1791. for (size_t j = sizeof(y->qs) - sizeof(y->qs) % 32; j < sizeof(y->qs); j += 16) {
  1792. for (size_t m = 0; m < 16; ++m) {
  1793. uint8_t q = 0;
  1794. for (size_t n = 0; n < 5; ++n) {
  1795. int xi = lroundf(x[m + n*16] * id) + 1; // -1, 0, 1 -> 0, 1, 2
  1796. q *= 3;
  1797. q += xi;
  1798. }
  1799. // ceiling division (243 == pow(3, 5))
  1800. q = ((uint16_t)q * 256 + (243 - 1)) / 243;
  1801. y[i].qs[j + m] = q;
  1802. }
  1803. x += 5*16;
  1804. }
  1805. // 4 elements per byte
  1806. for (size_t j = 0; j < sizeof(y->qh); ++j) {
  1807. uint8_t q = 0;
  1808. for (size_t m = 0; m < 4; ++m) {
  1809. // -1, 0, 1 -> 0, 1, 2
  1810. int xi = lroundf(x[j + m*sizeof(y->qh)] * id) + 1;
  1811. q *= 3;
  1812. q += xi;
  1813. }
  1814. // shift the first value to the most significant trit
  1815. q *= 3;
  1816. // ceiling division (243 == pow(3, 5))
  1817. q = ((uint16_t)q * 256 + (243 - 1)) / 243;
  1818. y[i].qh[j] = q;
  1819. }
  1820. x += 4*sizeof(y->qh);
  1821. }
  1822. }
  1823. void quantize_row_tq2_0_ref(const float * restrict x, block_tq2_0 * restrict y, int64_t k) {
  1824. assert(k % QK_K == 0);
  1825. const int64_t nb = k / QK_K;
  1826. for (int64_t i = 0; i < nb; i++) {
  1827. float amax = 0.0f; // absolute max
  1828. for (int j = 0; j < QK_K; j++) {
  1829. const float v = x[j];
  1830. amax = MAX(amax, fabsf(v));
  1831. }
  1832. const float d = amax;
  1833. const float id = d ? 1.0f/d : 0.0f;
  1834. y[i].d = GGML_FP32_TO_FP16(d);
  1835. for (size_t j = 0; j < sizeof(y->qs); j += 32) {
  1836. for (size_t m = 0; m < 32; ++m) {
  1837. uint8_t q = 0;
  1838. for (size_t n = 0; n < 4; ++n) {
  1839. // -1, 0, 1 -> 0, 1, 2
  1840. int xi = lroundf(x[m + n*32] * id) + 1;
  1841. q += (xi & 3) << (2*n);
  1842. }
  1843. y[i].qs[j + m] = q;
  1844. }
  1845. x += 4*32;
  1846. }
  1847. }
  1848. }
  1849. size_t quantize_tq1_0(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1850. (void)quant_weights; // not used
  1851. const size_t row_size = ggml_row_size(GGML_TYPE_TQ1_0, n_per_row);
  1852. quantize_row_tq1_0_ref(src, dst, (int64_t)nrow*n_per_row);
  1853. return nrow * row_size;
  1854. }
  1855. size_t quantize_tq2_0(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  1856. (void)quant_weights; // not used
  1857. const size_t row_size = ggml_row_size(GGML_TYPE_TQ2_0, n_per_row);
  1858. quantize_row_tq2_0_ref(src, dst, (int64_t)nrow*n_per_row);
  1859. return nrow * row_size;
  1860. }
  1861. void dequantize_row_tq1_0(const block_tq1_0 * restrict x, float * restrict y, int64_t k) {
  1862. assert(k % QK_K == 0);
  1863. const int64_t nb = k / QK_K;
  1864. const uint8_t pow3[6] = {1, 3, 9, 27, 81, 243};
  1865. for (int64_t i = 0; i < nb; ++i) {
  1866. const float d = GGML_FP16_TO_FP32(x[i].d);
  1867. for (size_t j = 0; j < sizeof(x->qs) - sizeof(x->qs) % 32; j += 32) {
  1868. for (size_t n = 0; n < 5; ++n) {
  1869. for (size_t m = 0; m < 32; ++m) {
  1870. uint8_t q = x[i].qs[j + m] * pow3[n];
  1871. int16_t xi = ((uint16_t) q * 3) >> 8;
  1872. *y++ = (float) (xi - 1) * d;
  1873. }
  1874. }
  1875. }
  1876. for (size_t j = sizeof(x->qs) - sizeof(x->qs) % 32; j < sizeof(x->qs); j += 16) {
  1877. for (size_t n = 0; n < 5; ++n) {
  1878. for (size_t m = 0; m < 16; ++m) {
  1879. uint8_t q = x[i].qs[j + m] * pow3[n];
  1880. int16_t xi = ((uint16_t) q * 3) >> 8;
  1881. *y++ = (float) (xi - 1) * d;
  1882. }
  1883. }
  1884. }
  1885. for (size_t n = 0; n < 4; ++n) {
  1886. for (size_t j = 0; j < sizeof(x->qh); ++j) {
  1887. uint8_t q = x[i].qh[j] * pow3[n];
  1888. int16_t xi = ((uint16_t) q * 3) >> 8;
  1889. *y++ = (float) (xi - 1) * d;
  1890. }
  1891. }
  1892. }
  1893. }
  1894. void dequantize_row_tq2_0(const block_tq2_0 * restrict x, float * restrict y, int64_t k) {
  1895. assert(k % QK_K == 0);
  1896. const int64_t nb = k / QK_K;
  1897. for (int64_t i = 0; i < nb; ++i) {
  1898. const float d = GGML_FP16_TO_FP32(x[i].d);
  1899. for (size_t j = 0; j < sizeof(x->qs); j += 32) {
  1900. for (size_t l = 0; l < 4; ++l) {
  1901. for (size_t m = 0; m < 32; ++m) {
  1902. int8_t q = (x[i].qs[j + m] >> (l*2)) & 3;
  1903. *y++ = (float) (q - 1) * d;
  1904. }
  1905. }
  1906. }
  1907. }
  1908. }
  1909. // ====================== "True" 2-bit (de)-quantization
  1910. void dequantize_row_iq2_xxs(const block_iq2_xxs * restrict x, float * restrict y, int64_t k) {
  1911. assert(k % QK_K == 0);
  1912. const int64_t nb = k / QK_K;
  1913. uint32_t aux32[2];
  1914. const uint8_t * aux8 = (const uint8_t *)aux32;
  1915. for (int i = 0; i < nb; i++) {
  1916. const float d = GGML_FP16_TO_FP32(x[i].d);
  1917. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  1918. memcpy(aux32, x[i].qs + 4*ib32, 2*sizeof(uint32_t));
  1919. const float db = d * (0.5f + (aux32[1] >> 28)) * 0.25f;
  1920. for (int l = 0; l < 4; ++l) {
  1921. const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
  1922. const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
  1923. for (int j = 0; j < 8; ++j) {
  1924. y[j] = db * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  1925. }
  1926. y += 8;
  1927. }
  1928. }
  1929. }
  1930. }
  1931. // ====================== 2.3125 bpw (de)-quantization
  1932. void dequantize_row_iq2_xs(const block_iq2_xs * restrict x, float * restrict y, int64_t k) {
  1933. assert(k % QK_K == 0);
  1934. const int64_t nb = k / QK_K;
  1935. float db[2];
  1936. for (int i = 0; i < nb; i++) {
  1937. const float d = GGML_FP16_TO_FP32(x[i].d);
  1938. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  1939. db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f;
  1940. db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f;
  1941. for (int l = 0; l < 4; ++l) {
  1942. const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (x[i].qs[4*ib32 + l] & 511));
  1943. const uint8_t signs = ksigns_iq2xs[x[i].qs[4*ib32 + l] >> 9];
  1944. for (int j = 0; j < 8; ++j) {
  1945. y[j] = db[l/2] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
  1946. }
  1947. y += 8;
  1948. }
  1949. }
  1950. }
  1951. }
  1952. // ====================== 2.5625 bpw (de)-quantization
  1953. void dequantize_row_iq2_s(const block_iq2_s * restrict x, float * restrict y, int64_t k) {
  1954. assert(k % QK_K == 0);
  1955. const int64_t nb = k / QK_K;
  1956. float db[2];
  1957. for (int i = 0; i < nb; i++) {
  1958. const float d = GGML_FP16_TO_FP32(x[i].d);
  1959. const uint8_t * qs = x[i].qs;
  1960. const uint8_t * qh = x[i].qh;
  1961. const uint8_t * signs = qs + QK_K/8;
  1962. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  1963. db[0] = d * (0.5f + (x[i].scales[ib32] & 0xf)) * 0.25f;
  1964. db[1] = d * (0.5f + (x[i].scales[ib32] >> 4)) * 0.25f;
  1965. for (int l = 0; l < 4; ++l) {
  1966. const float dl = db[l/2];
  1967. const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
  1968. for (int j = 0; j < 8; ++j) {
  1969. y[j] = dl * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1.f : 1.f);
  1970. }
  1971. y += 8;
  1972. }
  1973. qs += 4;
  1974. signs += 4;
  1975. }
  1976. }
  1977. }
  1978. // ====================== 3.0625 bpw (de)-quantization
  1979. void dequantize_row_iq3_xxs(const block_iq3_xxs * restrict x, float * restrict y, int64_t k) {
  1980. assert(k % QK_K == 0);
  1981. const int64_t nb = k / QK_K;
  1982. uint32_t aux32;
  1983. for (int i = 0; i < nb; i++) {
  1984. const float d = GGML_FP16_TO_FP32(x[i].d);
  1985. const uint8_t * qs = x[i].qs;
  1986. const uint8_t * scales_and_signs = qs + QK_K/4;
  1987. for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
  1988. memcpy(&aux32, scales_and_signs + 4*ib32, sizeof(uint32_t));
  1989. const float db = d * (0.5f + (aux32 >> 28)) * 0.5f;
  1990. for (int l = 0; l < 4; ++l) {
  1991. const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
  1992. const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + qs[2*l+0]);
  1993. const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + qs[2*l+1]);
  1994. for (int j = 0; j < 4; ++j) {
  1995. y[j+0] = db * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
  1996. y[j+4] = db * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
  1997. }
  1998. y += 8;
  1999. }
  2000. qs += 8;
  2001. }
  2002. }
  2003. }
  2004. // ====================== 3.3125 bpw (de)-quantization
  2005. void dequantize_row_iq3_s(const block_iq3_s * restrict x, float * restrict y, int64_t k) {
  2006. assert(k % QK_K == 0);
  2007. const int64_t nb = k / QK_K;
  2008. for (int i = 0; i < nb; i++) {
  2009. const float d = GGML_FP16_TO_FP32(x[i].d);
  2010. const uint8_t * qs = x[i].qs;
  2011. const uint8_t * qh = x[i].qh;
  2012. const uint8_t * signs = x[i].signs;
  2013. for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
  2014. const float db1 = d * (1 + 2*(x[i].scales[ib32/2] & 0xf));
  2015. const float db2 = d * (1 + 2*(x[i].scales[ib32/2] >> 4));
  2016. for (int l = 0; l < 4; ++l) {
  2017. const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[0] << (8-2*l)) & 256)));
  2018. const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[0] << (7-2*l)) & 256)));
  2019. for (int j = 0; j < 4; ++j) {
  2020. y[j+0] = db1 * grid1[j] * (signs[l] & kmask_iq2xs[j+0] ? -1.f : 1.f);
  2021. y[j+4] = db1 * grid2[j] * (signs[l] & kmask_iq2xs[j+4] ? -1.f : 1.f);
  2022. }
  2023. y += 8;
  2024. }
  2025. qs += 8;
  2026. signs += 4;
  2027. for (int l = 0; l < 4; ++l) {
  2028. const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[1] << (8-2*l)) & 256)));
  2029. const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[1] << (7-2*l)) & 256)));
  2030. for (int j = 0; j < 4; ++j) {
  2031. y[j+0] = db2 * grid1[j] * (signs[l] & kmask_iq2xs[j+0] ? -1.f : 1.f);
  2032. y[j+4] = db2 * grid2[j] * (signs[l] & kmask_iq2xs[j+4] ? -1.f : 1.f);
  2033. }
  2034. y += 8;
  2035. }
  2036. qh += 2;
  2037. qs += 8;
  2038. signs += 4;
  2039. }
  2040. }
  2041. }
  2042. // ====================== 1.5625 bpw (de)-quantization
  2043. void dequantize_row_iq1_s(const block_iq1_s * restrict x, float * restrict y, int64_t k) {
  2044. assert(k % QK_K == 0);
  2045. const int64_t nb = k / QK_K;
  2046. for (int i = 0; i < nb; i++) {
  2047. const float d = GGML_FP16_TO_FP32(x[i].d);
  2048. const uint8_t * qs = x[i].qs;
  2049. const uint16_t * qh = x[i].qh;
  2050. for (int ib = 0; ib < QK_K/32; ++ib) {
  2051. const float dl = d * (2*((qh[ib] >> 12) & 7) + 1);
  2052. const float delta = qh[ib] & 0x8000 ? -IQ1S_DELTA : IQ1S_DELTA;
  2053. for (int l = 0; l < 4; ++l) {
  2054. const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8)));
  2055. for (int j = 0; j < 8; ++j) {
  2056. y[j] = dl * (grid[j] + delta);
  2057. }
  2058. y += 8;
  2059. }
  2060. qs += 4;
  2061. }
  2062. }
  2063. }
  2064. void dequantize_row_iq1_m(const block_iq1_m * restrict x, float * restrict y, int64_t k) {
  2065. assert(k % QK_K == 0);
  2066. const int64_t nb = k / QK_K;
  2067. float delta[4];
  2068. uint16_t idx[4];
  2069. iq1m_scale_t scale;
  2070. for (int i = 0; i < nb; i++) {
  2071. const uint16_t * sc = (const uint16_t *)x[i].scales;
  2072. scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
  2073. const float d = GGML_FP16_TO_FP32(scale.f16);
  2074. const uint8_t * qs = x[i].qs;
  2075. const uint8_t * qh = x[i].qh;
  2076. for (int ib = 0; ib < QK_K/32; ++ib) {
  2077. const float dl1 = d * (2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1);
  2078. const float dl2 = d * (2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1);
  2079. idx[0] = qs[0] | ((qh[0] << 8) & 0x700);
  2080. idx[1] = qs[1] | ((qh[0] << 4) & 0x700);
  2081. idx[2] = qs[2] | ((qh[1] << 8) & 0x700);
  2082. idx[3] = qs[3] | ((qh[1] << 4) & 0x700);
  2083. delta[0] = qh[0] & 0x08 ? -IQ1S_DELTA : IQ1S_DELTA;
  2084. delta[1] = qh[0] & 0x80 ? -IQ1S_DELTA : IQ1S_DELTA;
  2085. delta[2] = qh[1] & 0x08 ? -IQ1S_DELTA : IQ1S_DELTA;
  2086. delta[3] = qh[1] & 0x80 ? -IQ1S_DELTA : IQ1S_DELTA;
  2087. for (int l = 0; l < 2; ++l) {
  2088. const int8_t * grid = (const int8_t *)(iq1s_grid + idx[l]);
  2089. for (int j = 0; j < 8; ++j) {
  2090. y[j] = dl1 * (grid[j] + delta[l]);
  2091. }
  2092. y += 8;
  2093. }
  2094. for (int l = 2; l < 4; ++l) {
  2095. const int8_t * grid = (const int8_t *)(iq1s_grid + idx[l]);
  2096. for (int j = 0; j < 8; ++j) {
  2097. y[j] = dl2 * (grid[j] + delta[l]);
  2098. }
  2099. y += 8;
  2100. }
  2101. qs += 4;
  2102. qh += 2;
  2103. }
  2104. }
  2105. }
  2106. static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
  2107. void dequantize_row_iq4_nl(const block_iq4_nl * restrict x, float * restrict y, int64_t k) {
  2108. assert(k % QK4_NL == 0);
  2109. const int64_t nb = k / QK4_NL;
  2110. for (int i = 0; i < nb; i++) {
  2111. const uint8_t * qs = x[i].qs;
  2112. const float d = GGML_FP16_TO_FP32(x[i].d);
  2113. for (int j = 0; j < QK4_NL/2; ++j) {
  2114. y[j+ 0] = d * kvalues_iq4nl[qs[j] & 0xf];
  2115. y[j+QK4_NL/2] = d * kvalues_iq4nl[qs[j] >> 4];
  2116. }
  2117. y += QK4_NL;
  2118. qs += QK4_NL/2;
  2119. }
  2120. }
  2121. void dequantize_row_iq4_xs(const block_iq4_xs * restrict x, float * restrict y, int64_t k) {
  2122. assert(k % QK_K == 0);
  2123. const int64_t nb = k / QK_K;
  2124. for (int i = 0; i < nb; i++) {
  2125. const uint8_t * qs = x[i].qs;
  2126. const float d = GGML_FP16_TO_FP32(x[i].d);
  2127. for (int ib = 0; ib < QK_K/32; ++ib) {
  2128. const int ls = ((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4);
  2129. const float dl = d * (ls - 32);
  2130. for (int j = 0; j < 16; ++j) {
  2131. y[j+ 0] = dl * kvalues_iq4nl[qs[j] & 0xf];
  2132. y[j+16] = dl * kvalues_iq4nl[qs[j] >> 4];
  2133. }
  2134. y += 32;
  2135. qs += 16;
  2136. }
  2137. }
  2138. }
  2139. //===================================== Q8_K ==============================================
  2140. void quantize_row_q8_K_ref(const float * restrict x, block_q8_K * restrict y, int64_t k) {
  2141. assert(k % QK_K == 0);
  2142. const int64_t nb = k / QK_K;
  2143. for (int i = 0; i < nb; i++) {
  2144. float max = 0;
  2145. float amax = 0;
  2146. for (int j = 0; j < QK_K; ++j) {
  2147. float ax = fabsf(x[j]);
  2148. if (ax > amax) {
  2149. amax = ax; max = x[j];
  2150. }
  2151. }
  2152. if (!amax) {
  2153. y[i].d = 0;
  2154. memset(y[i].qs, 0, QK_K);
  2155. x += QK_K;
  2156. continue;
  2157. }
  2158. //const float iscale = -128.f/max;
  2159. // We need this change for IQ2_XXS, else the AVX implementation becomes very awkward
  2160. const float iscale = -127.f/max;
  2161. for (int j = 0; j < QK_K; ++j) {
  2162. int v = nearest_int(iscale*x[j]);
  2163. y[i].qs[j] = MIN(127, v);
  2164. }
  2165. for (int j = 0; j < QK_K/16; ++j) {
  2166. int sum = 0;
  2167. for (int ii = 0; ii < 16; ++ii) {
  2168. sum += y[i].qs[j*16 + ii];
  2169. }
  2170. y[i].bsums[j] = sum;
  2171. }
  2172. y[i].d = 1/iscale;
  2173. x += QK_K;
  2174. }
  2175. }
  2176. void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int64_t k) {
  2177. assert(k % QK_K == 0);
  2178. const int64_t nb = k / QK_K;
  2179. for (int i = 0; i < nb; i++) {
  2180. for (int j = 0; j < QK_K; ++j) {
  2181. *y++ = x[i].d * x[i].qs[j];
  2182. }
  2183. }
  2184. }
  2185. // ================================ IQ2 quantization =============================================
  2186. typedef struct {
  2187. uint64_t * grid;
  2188. int * map;
  2189. uint16_t * neighbours;
  2190. } iq2_entry_t;
  2191. static iq2_entry_t iq2_data[4] = {
  2192. {NULL, NULL, NULL},
  2193. {NULL, NULL, NULL},
  2194. {NULL, NULL, NULL},
  2195. {NULL, NULL, NULL},
  2196. };
  2197. static inline int iq2_data_index(enum ggml_type type) {
  2198. GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M || type == GGML_TYPE_IQ2_S);
  2199. return type == GGML_TYPE_IQ2_XXS ? 0 :
  2200. type == GGML_TYPE_IQ2_XS ? 1 :
  2201. type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M ? 2 : 3;
  2202. }
  2203. static inline int iq2_grid_size(enum ggml_type type) {
  2204. GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M || type == GGML_TYPE_IQ2_S);
  2205. return type == GGML_TYPE_IQ2_XXS ? 256 :
  2206. type == GGML_TYPE_IQ2_XS ? 512 :
  2207. type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M ? NGRID_IQ1S : 1024;
  2208. }
  2209. static int iq2_compare_func(const void * left, const void * right) {
  2210. const int * l = (const int *)left;
  2211. const int * r = (const int *)right;
  2212. return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
  2213. }
  2214. void iq2xs_init_impl(enum ggml_type type) {
  2215. const int gindex = iq2_data_index(type);
  2216. const int grid_size = iq2_grid_size(type);
  2217. if (iq2_data[gindex].grid) {
  2218. return;
  2219. }
  2220. static const uint16_t kgrid_2bit_256[256] = {
  2221. 0, 2, 5, 8, 10, 17, 20, 32, 34, 40, 42, 65, 68, 80, 88, 97,
  2222. 100, 128, 130, 138, 162, 257, 260, 272, 277, 320, 388, 408, 512, 514, 546, 642,
  2223. 1025, 1028, 1040, 1057, 1060, 1088, 1090, 1096, 1120, 1153, 1156, 1168, 1188, 1280, 1282, 1288,
  2224. 1312, 1350, 1385, 1408, 1425, 1545, 1552, 1600, 1668, 1700, 2048, 2053, 2056, 2068, 2088, 2113,
  2225. 2116, 2128, 2130, 2184, 2308, 2368, 2562, 2580, 4097, 4100, 4112, 4129, 4160, 4192, 4228, 4240,
  2226. 4245, 4352, 4360, 4384, 4432, 4442, 4480, 4644, 4677, 5120, 5128, 5152, 5157, 5193, 5248, 5400,
  2227. 5474, 5632, 5654, 6145, 6148, 6160, 6208, 6273, 6400, 6405, 6560, 6737, 8192, 8194, 8202, 8260,
  2228. 8289, 8320, 8322, 8489, 8520, 8704, 8706, 9217, 9220, 9232, 9280, 9302, 9472, 9537, 9572, 9872,
  2229. 10248, 10272, 10388, 10820, 16385, 16388, 16400, 16408, 16417, 16420, 16448, 16456, 16470, 16480, 16513, 16516,
  2230. 16528, 16640, 16672, 16737, 16768, 16773, 16897, 16912, 16968, 16982, 17000, 17408, 17416, 17440, 17536, 17561,
  2231. 17682, 17700, 17920, 18433, 18436, 18448, 18496, 18501, 18688, 18776, 18785, 18818, 19013, 19088, 20480, 20488,
  2232. 20497, 20505, 20512, 20608, 20616, 20740, 20802, 20900, 21137, 21648, 21650, 21770, 22017, 22100, 22528, 22545,
  2233. 22553, 22628, 22848, 23048, 24580, 24592, 24640, 24680, 24832, 24917, 25112, 25184, 25600, 25605, 25872, 25874,
  2234. 25988, 26690, 32768, 32770, 32778, 32833, 32898, 33028, 33048, 33088, 33297, 33793, 33796, 33808, 33813, 33856,
  2235. 33888, 34048, 34118, 34196, 34313, 34368, 34400, 34818, 35076, 35345, 36868, 36880, 36900, 36928, 37025, 37142,
  2236. 37248, 37445, 37888, 37922, 37956, 38225, 39041, 39200, 40962, 41040, 41093, 41225, 41472, 42008, 43088, 43268,
  2237. };
  2238. static const uint16_t kgrid_2bit_512[512] = {
  2239. 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70,
  2240. 73, 80, 82, 85, 88, 97, 100, 128, 130, 133, 136, 145, 148, 153, 160, 257,
  2241. 260, 262, 265, 272, 274, 277, 280, 282, 289, 292, 320, 322, 325, 328, 337, 340,
  2242. 352, 360, 385, 388, 400, 512, 514, 517, 520, 529, 532, 544, 577, 580, 592, 597,
  2243. 640, 650, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1088, 1090, 1093, 1096,
  2244. 1105, 1108, 1110, 1120, 1153, 1156, 1168, 1280, 1282, 1285, 1288, 1297, 1300, 1312, 1345, 1348,
  2245. 1360, 1377, 1408, 1537, 1540, 1552, 1574, 1600, 1602, 1668, 2048, 2050, 2053, 2056, 2058, 2065,
  2246. 2068, 2080, 2085, 2113, 2116, 2128, 2136, 2176, 2208, 2218, 2305, 2308, 2320, 2368, 2433, 2441,
  2247. 2560, 2592, 2600, 2710, 2720, 4097, 4100, 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4160,
  2248. 4162, 4165, 4168, 4177, 4180, 4192, 4202, 4225, 4228, 4240, 4352, 4354, 4357, 4360, 4369, 4372,
  2249. 4384, 4417, 4420, 4432, 4480, 4500, 4502, 4609, 4612, 4614, 4624, 4672, 4704, 5120, 5122, 5125,
  2250. 5128, 5137, 5140, 5152, 5185, 5188, 5193, 5200, 5220, 5248, 5377, 5380, 5392, 5440, 5632, 5652,
  2251. 5705, 6145, 6148, 6160, 6162, 6208, 6228, 6278, 6400, 6405, 6502, 6737, 6825, 8192, 8194, 8197,
  2252. 8200, 8202, 8209, 8212, 8224, 8257, 8260, 8272, 8320, 8352, 8449, 8452, 8464, 8512, 8520, 8549,
  2253. 8704, 8738, 8832, 8872, 9217, 9220, 9232, 9257, 9280, 9472, 9537, 9554, 9625, 9729, 9754, 9894,
  2254. 10240, 10248, 10250, 10272, 10325, 10376, 10402, 10600, 10640, 10760, 10784, 10882, 10888, 10890, 16385, 16388,
  2255. 16390, 16393, 16400, 16402, 16405, 16408, 16417, 16420, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16480,
  2256. 16485, 16513, 16516, 16528, 16640, 16642, 16645, 16648, 16657, 16660, 16672, 16705, 16708, 16720, 16768, 16773,
  2257. 16802, 16897, 16900, 16912, 16914, 16937, 16960, 17408, 17410, 17413, 17416, 17425, 17428, 17433, 17440, 17473,
  2258. 17476, 17488, 17536, 17556, 17665, 17668, 17680, 17700, 17728, 17818, 17920, 17930, 17988, 18000, 18433, 18436,
  2259. 18448, 18496, 18501, 18516, 18530, 18688, 18705, 18756, 18768, 18793, 18948, 20480, 20482, 20485, 20488, 20497,
  2260. 20500, 20512, 20520, 20545, 20548, 20560, 20608, 20737, 20740, 20752, 20757, 20800, 20802, 20992, 21060, 21162,
  2261. 21505, 21508, 21520, 21537, 21568, 21600, 21633, 21665, 21760, 21768, 21888, 21896, 22049, 22120, 22177, 22528,
  2262. 22548, 22593, 22608, 22681, 22810, 22848, 22850, 23173, 24577, 24580, 24592, 24640, 24660, 24674, 24710, 24745,
  2263. 24832, 25124, 25162, 25234, 25600, 25622, 25872, 25920, 25925, 26020, 26625, 26730, 26917, 27142, 27220, 27234,
  2264. 32768, 32770, 32773, 32776, 32785, 32788, 32800, 32810, 32833, 32836, 32848, 32896, 32898, 32936, 32938, 33025,
  2265. 33028, 33030, 33040, 33088, 33105, 33113, 33280, 33312, 33408, 33410, 33440, 33448, 33793, 33796, 33808, 33810,
  2266. 33813, 33856, 33888, 33929, 34048, 34116, 34213, 34328, 34410, 34816, 34824, 34853, 34906, 34944, 34946, 34984,
  2267. 35078, 35362, 35456, 35464, 35478, 35496, 36865, 36868, 36880, 36928, 36950, 36996, 37120, 37154, 37220, 37462,
  2268. 37513, 37888, 37893, 37956, 37968, 37976, 38185, 38288, 38290, 38465, 38993, 39078, 39241, 39445, 39520, 40960,
  2269. 40962, 40968, 40970, 40992, 41002, 41120, 41297, 41305, 41382, 41472, 41474, 41480, 41514, 41600, 41632, 42048,
  2270. 42133, 42597, 42648, 43018, 43040, 43042, 43048, 43168, 43176, 43268, 43396, 43398, 43560, 43562, 43665, 43690,
  2271. };
  2272. static const uint16_t kgrid_1bit_2048[NGRID_IQ1S] = {
  2273. 0, 2, 5, 8, 10, 17, 21, 32, 34, 40, 42, 69, 81, 84, 86, 101,
  2274. 128, 130, 136, 138, 149, 160, 162, 168, 170, 260, 261, 273, 276, 278, 281, 282,
  2275. 293, 321, 326, 329, 338, 341, 346, 353, 356, 358, 360, 389, 401, 404, 406, 421,
  2276. 512, 514, 520, 522, 533, 544, 546, 552, 554, 581, 593, 601, 612, 617, 640, 642,
  2277. 648, 650, 657, 661, 665, 672, 674, 680, 682, 1041, 1044, 1046, 1061, 1089, 1097, 1109,
  2278. 1114, 1124, 1125, 1169, 1177, 1189, 1281, 1284, 1285, 1286, 1301, 1304, 1306, 1321, 1344, 1349,
  2279. 1354, 1360, 1361, 1364, 1365, 1366, 1369, 1376, 1378, 1381, 1384, 1386, 1409, 1425, 1429, 1432,
  2280. 1434, 1441, 1444, 1445, 1446, 1449, 1556, 1561, 1601, 1604, 1616, 1618, 1621, 1624, 1632, 1633,
  2281. 1638, 1641, 1669, 1681, 1684, 1689, 2048, 2050, 2056, 2058, 2069, 2080, 2082, 2088, 2090, 2117,
  2282. 2129, 2134, 2149, 2176, 2178, 2184, 2186, 2197, 2208, 2210, 2216, 2218, 2309, 2321, 2324, 2329,
  2283. 2340, 2341, 2369, 2384, 2385, 2389, 2401, 2404, 2409, 2449, 2452, 2454, 2457, 2469, 2560, 2562,
  2284. 2568, 2570, 2581, 2592, 2594, 2600, 2602, 2629, 2641, 2649, 2657, 2661, 2688, 2690, 2693, 2696,
  2285. 2698, 2709, 2720, 2722, 2728, 2730, 4112, 4113, 4116, 4121, 4132, 4133, 4161, 4164, 4176, 4181,
  2286. 4184, 4193, 4196, 4197, 4201, 4241, 4244, 4246, 4257, 4261, 4353, 4356, 4358, 4361, 4368, 4370,
  2287. 4373, 4376, 4385, 4388, 4393, 4421, 4426, 4432, 4433, 4434, 4436, 4437, 4438, 4441, 4448, 4453,
  2288. 4484, 4498, 4501, 4513, 4516, 4625, 4628, 4630, 4645, 4672, 4678, 4681, 4690, 4693, 4696, 4698,
  2289. 4708, 4710, 4741, 4753, 4756, 4758, 4773, 5121, 5126, 5129, 5140, 5141, 5144, 5145, 5153, 5158,
  2290. 5185, 5189, 5190, 5192, 5194, 5201, 5204, 5205, 5206, 5209, 5218, 5221, 5224, 5252, 5257, 5264,
  2291. 5268, 5269, 5272, 5273, 5274, 5281, 5284, 5285, 5289, 5378, 5381, 5386, 5393, 5396, 5397, 5398,
  2292. 5401, 5408, 5410, 5413, 5416, 5418, 5441, 5444, 5445, 5446, 5457, 5458, 5460, 5461, 5462, 5465,
  2293. 5466, 5473, 5476, 5477, 5478, 5481, 5504, 5506, 5508, 5509, 5512, 5514, 5520, 5521, 5524, 5525,
  2294. 5526, 5529, 5530, 5536, 5538, 5541, 5633, 5636, 5637, 5638, 5653, 5654, 5656, 5658, 5665, 5670,
  2295. 5696, 5698, 5700, 5701, 5704, 5706, 5713, 5717, 5718, 5720, 5721, 5729, 5732, 5733, 5736, 5737,
  2296. 5738, 5766, 5770, 5778, 5781, 5796, 5801, 6161, 6166, 6181, 6209, 6212, 6214, 6217, 6224, 6229,
  2297. 6232, 6234, 6240, 6241, 6244, 6246, 6249, 6277, 6289, 6292, 6309, 6416, 6418, 6421, 6426, 6433,
  2298. 6437, 6466, 6468, 6469, 6472, 6481, 6484, 6485, 6486, 6489, 6490, 6496, 6501, 6506, 6537, 6545,
  2299. 6546, 6549, 6552, 6561, 6566, 6569, 6665, 6678, 6692, 6694, 6724, 6726, 6729, 6736, 6738, 6741,
  2300. 6744, 6753, 6758, 6761, 6789, 6801, 6806, 6810, 8192, 8194, 8200, 8202, 8213, 8224, 8226, 8229,
  2301. 8232, 8234, 8261, 8273, 8281, 8289, 8293, 8320, 8322, 8328, 8330, 8341, 8352, 8354, 8357, 8360,
  2302. 8362, 8453, 8465, 8468, 8473, 8485, 8514, 8516, 8521, 8533, 8536, 8538, 8545, 8548, 8549, 8550,
  2303. 8581, 8592, 8598, 8601, 8613, 8705, 8712, 8714, 8721, 8725, 8736, 8738, 8744, 8746, 8773, 8785,
  2304. 8790, 8793, 8805, 8833, 8840, 8842, 8849, 8853, 8864, 8866, 8872, 8874, 9221, 9236, 9238, 9241,
  2305. 9253, 9284, 9285, 9286, 9289, 9298, 9301, 9304, 9306, 9318, 9349, 9361, 9364, 9369, 9377, 9381,
  2306. 9481, 9493, 9505, 9513, 9536, 9541, 9544, 9553, 9556, 9557, 9561, 9570, 9573, 9576, 9609, 9616,
  2307. 9620, 9621, 9624, 9626, 9633, 9636, 9638, 9641, 9733, 9744, 9746, 9753, 9765, 9793, 9801, 9813,
  2308. 9824, 9825, 9833, 9860, 9862, 9872, 9882, 10240, 10242, 10248, 10250, 10261, 10272, 10274, 10280, 10282,
  2309. 10309, 10321, 10324, 10341, 10368, 10370, 10376, 10378, 10400, 10402, 10408, 10410, 10505, 10513, 10516, 10521,
  2310. 10533, 10566, 10569, 10578, 10581, 10593, 10596, 10598, 10601, 10629, 10640, 10646, 10649, 10660, 10661, 10752,
  2311. 10754, 10760, 10762, 10784, 10786, 10792, 10794, 10821, 10833, 10838, 10841, 10853, 10880, 10882, 10888, 10890,
  2312. 10901, 10912, 10914, 10920, 10922, 16389, 16401, 16406, 16421, 16457, 16466, 16469, 16472, 16474, 16481, 16484,
  2313. 16486, 16532, 16537, 16545, 16550, 16640, 16641, 16644, 16646, 16649, 16658, 16661, 16662, 16664, 16666, 16673,
  2314. 16678, 16681, 16709, 16712, 16714, 16721, 16724, 16725, 16726, 16729, 16730, 16741, 16744, 16746, 16769, 16772,
  2315. 16774, 16784, 16786, 16789, 16800, 16801, 16802, 16901, 16913, 16916, 16918, 16933, 16961, 16978, 16981, 16986,
  2316. 16996, 17001, 17033, 17044, 17061, 17409, 17429, 17433, 17449, 17477, 17480, 17482, 17489, 17492, 17493, 17494,
  2317. 17505, 17506, 17509, 17512, 17514, 17537, 17542, 17545, 17552, 17554, 17557, 17568, 17569, 17577, 17665, 17666,
  2318. 17669, 17674, 17681, 17684, 17685, 17686, 17689, 17696, 17701, 17706, 17729, 17732, 17733, 17734, 17737, 17744,
  2319. 17745, 17748, 17749, 17750, 17752, 17753, 17761, 17764, 17765, 17766, 17769, 17794, 17796, 17797, 17800, 17809,
  2320. 17812, 17813, 17814, 17817, 17818, 17829, 17832, 17834, 17921, 17925, 17929, 17940, 17941, 17944, 17946, 17953,
  2321. 17956, 17961, 17984, 17986, 17989, 17992, 18000, 18001, 18002, 18005, 18006, 18009, 18018, 18021, 18024, 18049,
  2322. 18053, 18058, 18068, 18069, 18081, 18084, 18086, 18437, 18449, 18453, 18458, 18469, 18498, 18505, 18512, 18517,
  2323. 18520, 18529, 18532, 18534, 18537, 18565, 18577, 18580, 18582, 18585, 18597, 18689, 18693, 18694, 18698, 18704,
  2324. 18708, 18709, 18712, 18721, 18724, 18726, 18752, 18757, 18762, 18769, 18770, 18772, 18773, 18774, 18777, 18784,
  2325. 18786, 18789, 18790, 18794, 18822, 18825, 18834, 18837, 18838, 18840, 18849, 18852, 18854, 18857, 18966, 19012,
  2326. 19014, 19017, 19029, 19032, 19034, 19044, 19049, 19092, 19109, 20481, 20484, 20485, 20486, 20489, 20498, 20501,
  2327. 20506, 20513, 20516, 20521, 20544, 20549, 20552, 20561, 20564, 20565, 20566, 20569, 20581, 20584, 20614, 20617,
  2328. 20629, 20632, 20640, 20641, 20646, 20649, 20741, 20744, 20745, 20746, 20753, 20756, 20757, 20758, 20760, 20761,
  2329. 20768, 20773, 20774, 20776, 20778, 20801, 20804, 20805, 20806, 20809, 20816, 20817, 20818, 20820, 20821, 20822,
  2330. 20824, 20825, 20826, 20833, 20836, 20837, 20838, 20841, 20866, 20869, 20881, 20884, 20885, 20886, 20889, 20896,
  2331. 20901, 20906, 20993, 20998, 21010, 21013, 21018, 21025, 21028, 21058, 21061, 21066, 21073, 21076, 21077, 21078,
  2332. 21081, 21090, 21093, 21125, 21136, 21138, 21141, 21145, 21146, 21156, 21508, 21509, 21521, 21524, 21525, 21526,
  2333. 21528, 21529, 21537, 21541, 21544, 21546, 21569, 21572, 21573, 21574, 21577, 21578, 21584, 21585, 21588, 21589,
  2334. 21590, 21592, 21593, 21594, 21601, 21602, 21604, 21605, 21606, 21609, 21632, 21640, 21642, 21649, 21652, 21653,
  2335. 21654, 21657, 21665, 21668, 21669, 21674, 21761, 21762, 21764, 21765, 21766, 21769, 21776, 21777, 21778, 21780,
  2336. 21781, 21782, 21785, 21786, 21793, 21796, 21797, 21798, 21801, 21824, 21825, 21826, 21828, 21829, 21830, 21832,
  2337. 21833, 21840, 21841, 21842, 21844, 21845, 21846, 21848, 21849, 21850, 21856, 21857, 21860, 21861, 21862, 21864,
  2338. 21865, 21866, 21889, 21892, 21893, 21897, 21898, 21904, 21905, 21908, 21909, 21910, 21912, 21913, 21921, 21924,
  2339. 21925, 21926, 21929, 22016, 22017, 22018, 22020, 22022, 22024, 22025, 22033, 22036, 22037, 22040, 22041, 22048,
  2340. 22049, 22050, 22052, 22053, 22054, 22056, 22057, 22081, 22085, 22086, 22088, 22089, 22090, 22096, 22097, 22098,
  2341. 22100, 22101, 22102, 22104, 22105, 22106, 22113, 22116, 22117, 22121, 22146, 22149, 22150, 22152, 22153, 22154,
  2342. 22161, 22165, 22170, 22178, 22181, 22182, 22184, 22185, 22532, 22533, 22534, 22537, 22544, 22549, 22552, 22561,
  2343. 22570, 22597, 22600, 22602, 22609, 22612, 22613, 22614, 22616, 22617, 22624, 22626, 22628, 22629, 22658, 22665,
  2344. 22672, 22674, 22677, 22680, 22689, 22697, 22785, 22786, 22789, 22794, 22801, 22804, 22805, 22806, 22809, 22821,
  2345. 22849, 22852, 22853, 22854, 22857, 22864, 22865, 22866, 22868, 22869, 22870, 22872, 22873, 22874, 22881, 22884,
  2346. 22885, 22886, 22889, 22913, 22917, 22921, 22929, 22932, 22933, 22934, 22936, 22937, 22949, 23044, 23048, 23061,
  2347. 23066, 23072, 23077, 23078, 23081, 23109, 23112, 23113, 23121, 23125, 23126, 23128, 23129, 23138, 23141, 23144,
  2348. 23146, 23169, 23178, 23186, 23189, 23190, 23192, 23194, 23201, 24581, 24596, 24598, 24601, 24613, 24644, 24656,
  2349. 24661, 24662, 24664, 24666, 24673, 24676, 24678, 24681, 24705, 24726, 24741, 24833, 24836, 24838, 24841, 24850,
  2350. 24853, 24865, 24866, 24870, 24873, 24901, 24905, 24913, 24917, 24918, 24921, 24933, 24934, 24938, 24964, 24970,
  2351. 24978, 24981, 24993, 24998, 25001, 25105, 25110, 25113, 25152, 25153, 25158, 25173, 25174, 25176, 25184, 25221,
  2352. 25233, 25238, 25253, 25617, 25618, 25621, 25622, 25626, 25633, 25638, 25641, 25664, 25666, 25669, 25672, 25674,
  2353. 25681, 25684, 25685, 25686, 25689, 25690, 25696, 25698, 25701, 25732, 25733, 25737, 25744, 25746, 25748, 25749,
  2354. 25750, 25752, 25754, 25761, 25764, 25769, 25861, 25864, 25866, 25873, 25877, 25878, 25881, 25924, 25925, 25926,
  2355. 25929, 25936, 25937, 25940, 25941, 25942, 25945, 25953, 25956, 25957, 25958, 25961, 25990, 25993, 25994, 26001,
  2356. 26005, 26006, 26009, 26010, 26018, 26021, 26022, 26024, 26114, 26121, 26133, 26144, 26150, 26152, 26153, 26176,
  2357. 26181, 26184, 26186, 26193, 26196, 26197, 26198, 26200, 26202, 26208, 26213, 26216, 26240, 26242, 26245, 26250,
  2358. 26260, 26262, 26264, 26265, 26272, 26276, 26278, 26282, 26646, 26649, 26661, 26689, 26706, 26709, 26714, 26721,
  2359. 26729, 26757, 26769, 26776, 26790, 26881, 26884, 26896, 26901, 26913, 26916, 26918, 26921, 26944, 26945, 26949,
  2360. 26950, 26952, 26961, 26964, 26965, 26966, 26969, 26976, 26981, 26986, 27010, 27012, 27018, 27029, 27041, 27044,
  2361. 27045, 27049, 27153, 27158, 27160, 27201, 27204, 27209, 27216, 27221, 27224, 27226, 27236, 27237, 27241, 27270,
  2362. 27284, 27288, 27290, 27302, 32768, 32770, 32776, 32778, 32800, 32802, 32808, 32810, 32837, 32848, 32849, 32852,
  2363. 32854, 32857, 32869, 32896, 32898, 32904, 32906, 32917, 32928, 32930, 32936, 32938, 33029, 33041, 33044, 33046,
  2364. 33049, 33061, 33089, 33092, 33097, 33104, 33106, 33109, 33110, 33112, 33113, 33124, 33126, 33129, 33157, 33161,
  2365. 33172, 33174, 33177, 33189, 33280, 33282, 33288, 33290, 33301, 33312, 33314, 33320, 33322, 33361, 33364, 33369,
  2366. 33381, 33408, 33410, 33416, 33418, 33429, 33440, 33442, 33448, 33450, 33812, 33817, 33857, 33860, 33873, 33877,
  2367. 33882, 33889, 33892, 33897, 33940, 33945, 34049, 34057, 34066, 34069, 34074, 34086, 34089, 34112, 34113, 34117,
  2368. 34120, 34129, 34132, 34133, 34134, 34137, 34138, 34149, 34150, 34152, 34154, 34177, 34180, 34182, 34185, 34192,
  2369. 34194, 34197, 34200, 34214, 34321, 34326, 34329, 34341, 34369, 34372, 34377, 34378, 34384, 34389, 34393, 34394,
  2370. 34401, 34406, 34410, 34437, 34449, 34458, 34468, 34816, 34818, 34824, 34826, 34837, 34848, 34850, 34856, 34858,
  2371. 34881, 34885, 34897, 34900, 34905, 34917, 34921, 34944, 34946, 34952, 34954, 34965, 34976, 34978, 34984, 34986,
  2372. 35077, 35078, 35089, 35092, 35094, 35109, 35137, 35140, 35142, 35145, 35152, 35154, 35157, 35162, 35169, 35172,
  2373. 35205, 35222, 35225, 35237, 35328, 35330, 35336, 35338, 35349, 35360, 35362, 35368, 35370, 35397, 35409, 35412,
  2374. 35414, 35456, 35458, 35464, 35466, 35477, 35488, 35490, 35496, 35498, 36869, 36881, 36886, 36888, 36889, 36901,
  2375. 36929, 36934, 36937, 36949, 36952, 36954, 36969, 36970, 36997, 37009, 37012, 37014, 37017, 37029, 37121, 37124,
  2376. 37126, 37129, 37136, 37141, 37144, 37146, 37153, 37156, 37158, 37161, 37184, 37189, 37200, 37201, 37204, 37205,
  2377. 37206, 37209, 37218, 37221, 37252, 37254, 37266, 37269, 37272, 37281, 37284, 37286, 37289, 37381, 37393, 37396,
  2378. 37401, 37413, 37444, 37446, 37449, 37456, 37458, 37461, 37464, 37478, 37481, 37509, 37524, 37526, 37545, 37889,
  2379. 37892, 37894, 37904, 37909, 37912, 37926, 37952, 37962, 37969, 37972, 37973, 37974, 37976, 37977, 37984, 37985,
  2380. 37986, 37989, 38020, 38022, 38034, 38036, 38037, 38040, 38049, 38057, 38144, 38149, 38152, 38154, 38160, 38161,
  2381. 38164, 38165, 38166, 38169, 38177, 38181, 38185, 38186, 38209, 38212, 38213, 38214, 38217, 38224, 38225, 38226,
  2382. 38228, 38229, 38230, 38232, 38233, 38234, 38241, 38244, 38245, 38246, 38249, 38273, 38277, 38280, 38289, 38290,
  2383. 38292, 38293, 38294, 38297, 38298, 38304, 38306, 38309, 38312, 38314, 38401, 38404, 38416, 38421, 38425, 38432,
  2384. 38438, 38441, 38469, 38472, 38473, 38481, 38482, 38485, 38486, 38489, 38501, 38504, 38530, 38532, 38537, 38538,
  2385. 38546, 38548, 38549, 38564, 38566, 38569, 38917, 38934, 38937, 38949, 38977, 38982, 38992, 38994, 38997, 38998,
  2386. 39002, 39012, 39013, 39045, 39057, 39062, 39065, 39077, 39172, 39174, 39177, 39184, 39186, 39189, 39192, 39194,
  2387. 39200, 39201, 39204, 39206, 39232, 39234, 39237, 39240, 39242, 39249, 39252, 39253, 39254, 39257, 39266, 39269,
  2388. 39270, 39274, 39297, 39300, 39312, 39314, 39317, 39322, 39329, 39334, 39429, 39445, 39461, 39492, 39494, 39497,
  2389. 39504, 39509, 39512, 39521, 39557, 39569, 39572, 39573, 39574, 40960, 40962, 40968, 40970, 40981, 40992, 40994,
  2390. 41000, 41002, 41029, 41041, 41044, 41046, 41049, 41088, 41090, 41096, 41098, 41109, 41120, 41122, 41128, 41130,
  2391. 41221, 41225, 41233, 41236, 41238, 41241, 41242, 41286, 41289, 41297, 41301, 41304, 41306, 41313, 41316, 41349,
  2392. 41360, 41362, 41366, 41369, 41474, 41480, 41482, 41488, 41497, 41506, 41512, 41514, 41541, 41553, 41558, 41561,
  2393. 41573, 41600, 41602, 41608, 41610, 41621, 41632, 41634, 41640, 41642, 42009, 42021, 42049, 42052, 42064, 42068,
  2394. 42069, 42072, 42074, 42081, 42085, 42086, 42088, 42089, 42117, 42246, 42249, 42256, 42258, 42261, 42264, 42278,
  2395. 42281, 42306, 42309, 42321, 42324, 42325, 42326, 42329, 42341, 42346, 42369, 42372, 42373, 42374, 42377, 42386,
  2396. 42389, 42392, 42501, 42513, 42518, 42522, 42529, 42533, 42564, 42566, 42570, 42578, 42581, 42582, 42584, 42592,
  2397. 42594, 42630, 42640, 42645, 42646, 42649, 42657, 42660, 42662, 43008, 43010, 43016, 43018, 43040, 43042, 43048,
  2398. 43050, 43089, 43092, 43094, 43097, 43136, 43138, 43144, 43146, 43157, 43168, 43170, 43176, 43178, 43269, 43284,
  2399. 43289, 43297, 43301, 43329, 43344, 43349, 43354, 43361, 43366, 43369, 43408, 43414, 43520, 43522, 43528, 43530,
  2400. 43552, 43554, 43560, 43562, 43601, 43604, 43606, 43648, 43650, 43656, 43658, 43669, 43680, 43682, 43688, 43690,
  2401. };
  2402. static const uint16_t kgrid_2bit_1024[1024] = {
  2403. 0, 2, 5, 8, 10, 17, 20, 22, 25, 32, 34, 37, 40, 65, 68, 70,
  2404. 73, 80, 82, 85, 88, 97, 100, 102, 105, 128, 130, 133, 136, 145, 148, 160,
  2405. 165, 170, 257, 260, 262, 265, 272, 274, 277, 280, 289, 292, 320, 322, 325, 328,
  2406. 337, 340, 342, 345, 352, 357, 360, 385, 388, 400, 402, 405, 417, 420, 512, 514,
  2407. 517, 520, 529, 532, 544, 554, 577, 580, 582, 585, 592, 597, 640, 645, 650, 660,
  2408. 674, 1025, 1028, 1030, 1033, 1040, 1042, 1045, 1048, 1057, 1060, 1062, 1065, 1088, 1090, 1093,
  2409. 1096, 1098, 1105, 1108, 1110, 1113, 1120, 1122, 1125, 1153, 1156, 1158, 1161, 1168, 1173, 1176,
  2410. 1185, 1188, 1280, 1282, 1285, 1288, 1290, 1297, 1300, 1302, 1305, 1312, 1317, 1320, 1345, 1348,
  2411. 1350, 1353, 1360, 1362, 1365, 1368, 1377, 1380, 1408, 1410, 1413, 1416, 1425, 1428, 1440, 1537,
  2412. 1540, 1542, 1545, 1552, 1557, 1600, 1605, 1608, 1617, 1620, 1632, 1665, 1668, 1680, 2048, 2050,
  2413. 2053, 2056, 2065, 2068, 2070, 2073, 2080, 2085, 2090, 2113, 2116, 2118, 2121, 2128, 2130, 2133,
  2414. 2136, 2145, 2148, 2176, 2181, 2196, 2218, 2305, 2308, 2320, 2322, 2325, 2328, 2337, 2368, 2373,
  2415. 2376, 2385, 2388, 2400, 2433, 2448, 2560, 2577, 2580, 2594, 2600, 2602, 2640, 2713, 4097, 4100,
  2416. 4102, 4105, 4112, 4114, 4117, 4120, 4129, 4132, 4134, 4160, 4162, 4165, 4168, 4177, 4180, 4182,
  2417. 4185, 4192, 4194, 4197, 4200, 4225, 4228, 4230, 4240, 4245, 4248, 4257, 4260, 4352, 4354, 4357,
  2418. 4360, 4362, 4369, 4372, 4374, 4377, 4384, 4386, 4389, 4392, 4417, 4420, 4422, 4425, 4432, 4434,
  2419. 4437, 4440, 4449, 4452, 4480, 4482, 4485, 4488, 4497, 4500, 4609, 4612, 4617, 4624, 4629, 4641,
  2420. 4644, 4672, 4677, 4689, 4692, 4737, 4740, 4752, 5120, 5122, 5125, 5128, 5137, 5140, 5142, 5145,
  2421. 5152, 5157, 5160, 5185, 5188, 5190, 5193, 5200, 5202, 5205, 5208, 5217, 5220, 5248, 5250, 5253,
  2422. 5256, 5265, 5268, 5280, 5377, 5380, 5382, 5385, 5392, 5394, 5397, 5400, 5409, 5412, 5440, 5442,
  2423. 5445, 5448, 5457, 5460, 5472, 5505, 5508, 5520, 5632, 5637, 5640, 5649, 5652, 5664, 5697, 5700,
  2424. 5712, 5760, 5802, 6145, 6148, 6150, 6153, 6160, 6165, 6168, 6177, 6208, 6210, 6213, 6216, 6225,
  2425. 6228, 6240, 6273, 6276, 6400, 6402, 6405, 6408, 6417, 6420, 6432, 6465, 6468, 6480, 6505, 6562,
  2426. 6660, 6672, 6720, 6742, 8192, 8194, 8197, 8200, 8209, 8212, 8214, 8217, 8224, 8229, 8234, 8257,
  2427. 8260, 8272, 8274, 8277, 8292, 8320, 8330, 8340, 8362, 8449, 8452, 8464, 8466, 8469, 8481, 8512,
  2428. 8514, 8517, 8529, 8532, 8544, 8577, 8580, 8592, 8704, 8714, 8738, 8744, 8746, 8772, 8784, 8840,
  2429. 8842, 8872, 9217, 9220, 9222, 9225, 9232, 9237, 9240, 9249, 9252, 9280, 9282, 9285, 9288, 9297,
  2430. 9300, 9312, 9345, 9348, 9360, 9472, 9477, 9480, 9489, 9492, 9504, 9537, 9540, 9552, 9574, 9600,
  2431. 9729, 9732, 9744, 9792, 9817, 10240, 10245, 10257, 10260, 10305, 10308, 10320, 10378, 10410, 10497, 10500,
  2432. 10512, 10645, 10762, 10786, 10852, 10888, 10890, 16385, 16388, 16390, 16393, 16400, 16402, 16405, 16408, 16410,
  2433. 16417, 16420, 16422, 16448, 16450, 16453, 16456, 16458, 16465, 16468, 16470, 16473, 16480, 16482, 16485, 16513,
  2434. 16516, 16528, 16533, 16536, 16545, 16548, 16640, 16642, 16645, 16648, 16657, 16660, 16662, 16665, 16672, 16674,
  2435. 16677, 16705, 16708, 16710, 16713, 16720, 16722, 16725, 16728, 16737, 16740, 16768, 16770, 16773, 16776, 16785,
  2436. 16788, 16800, 16897, 16900, 16912, 16914, 16917, 16920, 16932, 16960, 16965, 16968, 16977, 16980, 16992, 17025,
  2437. 17028, 17408, 17410, 17413, 17416, 17418, 17425, 17428, 17430, 17433, 17440, 17442, 17445, 17448, 17473, 17476,
  2438. 17478, 17481, 17488, 17490, 17493, 17496, 17505, 17508, 17536, 17538, 17541, 17544, 17553, 17556, 17568, 17665,
  2439. 17668, 17670, 17673, 17680, 17682, 17685, 17688, 17697, 17700, 17728, 17730, 17733, 17736, 17745, 17748, 17760,
  2440. 17770, 17793, 17796, 17808, 17920, 17922, 17925, 17928, 17937, 17940, 17952, 17985, 17988, 18000, 18048, 18085,
  2441. 18433, 18436, 18441, 18448, 18450, 18453, 18456, 18465, 18468, 18496, 18498, 18501, 18504, 18513, 18516, 18528,
  2442. 18564, 18576, 18688, 18690, 18693, 18696, 18705, 18708, 18720, 18753, 18756, 18768, 18816, 18838, 18945, 18948,
  2443. 18960, 19008, 20480, 20482, 20485, 20488, 20497, 20500, 20502, 20505, 20512, 20514, 20517, 20520, 20545, 20548,
  2444. 20550, 20553, 20560, 20562, 20565, 20568, 20577, 20580, 20608, 20610, 20613, 20616, 20625, 20628, 20737, 20740,
  2445. 20742, 20745, 20752, 20754, 20757, 20760, 20769, 20772, 20800, 20802, 20805, 20808, 20817, 20820, 20832, 20865,
  2446. 20868, 20880, 20992, 20997, 21000, 21009, 21012, 21024, 21057, 21060, 21072, 21097, 21120, 21505, 21508, 21510,
  2447. 21513, 21520, 21522, 21525, 21528, 21537, 21540, 21568, 21570, 21573, 21576, 21585, 21588, 21600, 21633, 21636,
  2448. 21648, 21760, 21762, 21765, 21768, 21777, 21780, 21792, 21825, 21828, 21840, 21888, 22017, 22020, 22032, 22054,
  2449. 22080, 22528, 22530, 22533, 22536, 22545, 22548, 22560, 22593, 22596, 22608, 22618, 22656, 22785, 22788, 22800,
  2450. 22848, 23040, 23065, 23173, 23208, 24577, 24580, 24582, 24592, 24594, 24597, 24600, 24609, 24612, 24640, 24645,
  2451. 24648, 24657, 24660, 24672, 24708, 24720, 24832, 24834, 24837, 24840, 24849, 24852, 24864, 24897, 24900, 24912,
  2452. 24960, 24985, 25092, 25104, 25152, 25174, 25249, 25600, 25605, 25608, 25617, 25620, 25632, 25665, 25668, 25680,
  2453. 25728, 25857, 25860, 25872, 25920, 25930, 25960, 26002, 26112, 26260, 26625, 26628, 26640, 26725, 26776, 26880,
  2454. 26922, 27202, 27297, 32768, 32770, 32773, 32776, 32785, 32788, 32793, 32800, 32805, 32833, 32836, 32848, 32850,
  2455. 32853, 32856, 32865, 32896, 32901, 32913, 32916, 33025, 33028, 33033, 33040, 33042, 33045, 33048, 33057, 33060,
  2456. 33088, 33090, 33093, 33096, 33105, 33108, 33153, 33156, 33168, 33193, 33280, 33285, 33290, 33297, 33300, 33345,
  2457. 33348, 33360, 33793, 33796, 33798, 33801, 33808, 33810, 33813, 33816, 33825, 33856, 33858, 33861, 33864, 33873,
  2458. 33876, 33888, 33921, 33924, 33936, 34048, 34050, 34053, 34056, 34065, 34068, 34080, 34113, 34116, 34128, 34176,
  2459. 34186, 34305, 34308, 34320, 34345, 34368, 34816, 34821, 34833, 34836, 34881, 34884, 34896, 34978, 35073, 35076,
  2460. 35136, 35173, 35362, 35416, 35418, 35458, 35490, 36865, 36868, 36873, 36880, 36882, 36885, 36888, 36900, 36928,
  2461. 36930, 36933, 36936, 36945, 36948, 36960, 36993, 36996, 37008, 37120, 37125, 37137, 37140, 37185, 37188, 37200,
  2462. 37210, 37377, 37380, 37392, 37440, 37542, 37888, 37890, 37893, 37896, 37905, 37908, 37920, 37953, 37956, 37968,
  2463. 38016, 38038, 38145, 38148, 38160, 38208, 38296, 38305, 38400, 38470, 38500, 38913, 38916, 38928, 38950, 38976,
  2464. 39081, 39168, 39241, 39250, 39568, 40960, 40965, 40970, 40980, 40994, 41002, 41025, 41028, 41040, 41122, 41130,
  2465. 41280, 41317, 41474, 41482, 41506, 41512, 41514, 41602, 41608, 41610, 41640, 41985, 41988, 42000, 42048, 42121,
  2466. 42148, 42240, 42265, 42577, 43018, 43048, 43170, 43348, 43398, 43528, 43530, 43552, 43554, 43560, 43656, 43690,
  2467. };
  2468. const int kmap_size = 43692;
  2469. //const int nwant = type == GGML_TYPE_IQ1_S ? 3 : 2;
  2470. const int nwant = type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M ? 3 : type == GGML_TYPE_IQ2_S ? 1 : 2;
  2471. const uint16_t * kgrid = type == GGML_TYPE_IQ2_XXS ? kgrid_2bit_256 :
  2472. type == GGML_TYPE_IQ2_XS ? kgrid_2bit_512 :
  2473. type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M ? kgrid_1bit_2048 : kgrid_2bit_1024;
  2474. uint64_t * kgrid_q2xs;
  2475. int * kmap_q2xs;
  2476. uint16_t * kneighbors_q2xs;
  2477. //printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
  2478. uint64_t * the_grid = (uint64_t *)malloc(grid_size*sizeof(uint64_t));
  2479. for (int k = 0; k < grid_size; ++k) {
  2480. int8_t * pos = (int8_t *)(the_grid + k);
  2481. for (int i = 0; i < 8; ++i) {
  2482. int l = (kgrid[k] >> 2*i) & 0x3;
  2483. pos[i] = 2*l + 1;
  2484. }
  2485. }
  2486. kgrid_q2xs = the_grid;
  2487. iq2_data[gindex].grid = the_grid;
  2488. kmap_q2xs = (int *)malloc(kmap_size*sizeof(int));
  2489. iq2_data[gindex].map = kmap_q2xs;
  2490. for (int i = 0; i < kmap_size; ++i) kmap_q2xs[i] = -1;
  2491. uint64_t aux64;
  2492. uint8_t * aux8 = (uint8_t *)&aux64;
  2493. for (int i = 0; i < grid_size; ++i) {
  2494. aux64 = kgrid_q2xs[i];
  2495. uint16_t index = 0;
  2496. for (int k=0; k<8; ++k) {
  2497. uint16_t q = (aux8[k] - 1)/2;
  2498. index |= (q << 2*k);
  2499. }
  2500. kmap_q2xs[index] = i;
  2501. }
  2502. int8_t pos[8];
  2503. int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
  2504. int num_neighbors = 0, num_not_in_map = 0;
  2505. for (int i = 0; i < kmap_size; ++i) {
  2506. if (kmap_q2xs[i] >= 0) continue;
  2507. ++num_not_in_map;
  2508. for (int k = 0; k < 8; ++k) {
  2509. int l = (i >> 2*k) & 0x3;
  2510. pos[k] = 2*l + 1;
  2511. }
  2512. for (int j = 0; j < grid_size; ++j) {
  2513. const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
  2514. int d2 = 0;
  2515. for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  2516. dist2[2*j+0] = d2;
  2517. dist2[2*j+1] = j;
  2518. }
  2519. qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
  2520. int n = 0; int d2 = dist2[0];
  2521. int nhave = 1;
  2522. for (int j = 0; j < grid_size; ++j) {
  2523. if (dist2[2*j] > d2) {
  2524. if (nhave == nwant) break;
  2525. d2 = dist2[2*j];
  2526. ++nhave;
  2527. }
  2528. ++n;
  2529. }
  2530. num_neighbors += n;
  2531. }
  2532. //printf("%s: %d neighbours in total\n", __func__, num_neighbors);
  2533. kneighbors_q2xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
  2534. iq2_data[gindex].neighbours = kneighbors_q2xs;
  2535. int counter = 0;
  2536. for (int i = 0; i < kmap_size; ++i) {
  2537. if (kmap_q2xs[i] >= 0) continue;
  2538. for (int k = 0; k < 8; ++k) {
  2539. int l = (i >> 2*k) & 0x3;
  2540. pos[k] = 2*l + 1;
  2541. }
  2542. for (int j = 0; j < grid_size; ++j) {
  2543. const int8_t * pg = (const int8_t *)(kgrid_q2xs + j);
  2544. int d2 = 0;
  2545. for (int k = 0; k < 8; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  2546. dist2[2*j+0] = d2;
  2547. dist2[2*j+1] = j;
  2548. }
  2549. qsort(dist2, grid_size, 2*sizeof(int), iq2_compare_func);
  2550. kmap_q2xs[i] = -(counter + 1);
  2551. int d2 = dist2[0];
  2552. uint16_t * start = &kneighbors_q2xs[counter++];
  2553. int n = 0, nhave = 1;
  2554. for (int j = 0; j < grid_size; ++j) {
  2555. if (dist2[2*j] > d2) {
  2556. if (nhave == nwant) break;
  2557. d2 = dist2[2*j];
  2558. ++nhave;
  2559. }
  2560. kneighbors_q2xs[counter++] = dist2[2*j+1];
  2561. ++n;
  2562. }
  2563. *start = n;
  2564. }
  2565. free(dist2);
  2566. }
  2567. void iq2xs_free_impl(enum ggml_type type) {
  2568. GGML_ASSERT(type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ1_S || type == GGML_TYPE_IQ1_M || type == GGML_TYPE_IQ2_S);
  2569. const int gindex = iq2_data_index(type);
  2570. if (iq2_data[gindex].grid) {
  2571. free(iq2_data[gindex].grid); iq2_data[gindex].grid = NULL;
  2572. free(iq2_data[gindex].map); iq2_data[gindex].map = NULL;
  2573. free(iq2_data[gindex].neighbours); iq2_data[gindex].neighbours = NULL;
  2574. }
  2575. }
  2576. static int iq2_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
  2577. const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
  2578. int num_neighbors = neighbours[0];
  2579. GGML_ASSERT(num_neighbors > 0);
  2580. float best_d2 = FLT_MAX;
  2581. int grid_index = -1;
  2582. for (int j = 1; j <= num_neighbors; ++j) {
  2583. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  2584. float d2 = 0;
  2585. for (int i = 0; i < 8; ++i) {
  2586. float q = pg[i];
  2587. float diff = scale*q - xval[i];
  2588. d2 += weight[i]*diff*diff;
  2589. }
  2590. if (d2 < best_d2) {
  2591. best_d2 = d2; grid_index = neighbours[j];
  2592. }
  2593. }
  2594. GGML_ASSERT(grid_index >= 0);
  2595. const int8_t * pg = (const int8_t *)(grid + grid_index);
  2596. for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
  2597. return grid_index;
  2598. }
  2599. static void quantize_row_iq2_xxs_impl(const float * restrict x, void * restrict vy, int64_t n, const float * restrict quant_weights) {
  2600. const int gindex = iq2_data_index(GGML_TYPE_IQ2_XXS);
  2601. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  2602. const int * kmap_q2xs = iq2_data[gindex].map;
  2603. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  2604. GGML_ASSERT(quant_weights && "missing quantization weights");
  2605. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  2606. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  2607. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  2608. GGML_ASSERT(n%QK_K == 0);
  2609. const int kMaxQ = 3;
  2610. const int64_t nbl = n/QK_K;
  2611. block_iq2_xxs * y = vy;
  2612. float scales[QK_K/32];
  2613. float weight[32];
  2614. float xval[32];
  2615. int8_t L[32];
  2616. int8_t Laux[32];
  2617. float waux[32];
  2618. uint8_t block_signs[4];
  2619. uint32_t q2[2*(QK_K/32)];
  2620. for (int ibl = 0; ibl < nbl; ++ibl) {
  2621. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  2622. memset(q2, 0, QK_K/4);
  2623. float max_scale = 0;
  2624. const float * xbl = x + QK_K*ibl;
  2625. float sumx2 = 0;
  2626. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  2627. float sigma2 = sumx2/QK_K;
  2628. for (int ib = 0; ib < QK_K/32; ++ib) {
  2629. const float * xb = xbl + 32*ib;
  2630. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  2631. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  2632. for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
  2633. for (int k = 0; k < 4; ++k) {
  2634. int nflip = 0;
  2635. uint8_t s = 0;
  2636. for (int i = 0; i < 8; ++i) {
  2637. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  2638. else {
  2639. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  2640. }
  2641. }
  2642. if (nflip%2) {
  2643. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  2644. for (int i = 1; i < 8; ++i) {
  2645. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  2646. if (ax < min) {
  2647. min = ax; imin = i;
  2648. }
  2649. }
  2650. xval[8*k+imin] = -xval[8*k+imin];
  2651. s ^= (1 << imin);
  2652. }
  2653. block_signs[k] = s & 127;
  2654. }
  2655. float max = xval[0];
  2656. for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
  2657. if (max < GROUP_MAX_EPS) {
  2658. scales[ib] = 0;
  2659. memset(L, 0, 32);
  2660. continue;
  2661. }
  2662. float scale = make_qp_quants(32, kMaxQ+1, xval, (uint8_t*)L, weight);
  2663. float eff_max = scale*kMaxQ;
  2664. float best = 0;
  2665. for (int is = -6; is <= 6; ++is) {
  2666. float id = (2*kMaxQ-1+is*0.1f)/eff_max;
  2667. float this_scale = 1/id;
  2668. for (int k = 0; k < 4; ++k) {
  2669. for (int i = 0; i < 8; ++i) {
  2670. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  2671. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  2672. }
  2673. uint16_t u = 0;
  2674. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  2675. int grid_index = kmap_q2xs[u];
  2676. if (grid_index < 0) {
  2677. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  2678. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  2679. }
  2680. }
  2681. float sumqx = 0, sumq2 = 0;
  2682. for (int i = 0; i < 32; ++i) {
  2683. float w = weight[i];
  2684. float q = 2*Laux[i] + 1;
  2685. sumqx += w*xval[i]*q;
  2686. sumq2 += w*q*q;
  2687. }
  2688. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  2689. scale = sumqx/sumq2; best = scale*sumqx;
  2690. memcpy(L, Laux, 32);
  2691. }
  2692. }
  2693. if (scale > 0) {
  2694. float id = 1/scale;
  2695. for (int k = 0; k < 4; ++k) {
  2696. uint16_t u = 0;
  2697. for (int i = 0; i < 8; ++i) {
  2698. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  2699. l = MAX(0, MIN(kMaxQ-1, l));
  2700. u |= (l << 2*i);
  2701. }
  2702. int grid_index = kmap_q2xs[u];
  2703. if (grid_index < 0) {
  2704. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  2705. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  2706. }
  2707. const int8_t * pg = (const int8_t *)(kgrid_q2xs + grid_index);
  2708. for (int i = 0; i < 8; ++i) L[8*k+i] = (pg[i] - 1)/2;
  2709. }
  2710. float sumqx = 0, sumq2 = 0;
  2711. for (int i = 0; i < 32; ++i) {
  2712. float w = weight[i];
  2713. float q = 2*L[i] + 1;
  2714. sumqx += w*xval[i]*q;
  2715. sumq2 += w*q*q;
  2716. }
  2717. if (sumq2 > 0) scale = sumqx/sumq2;
  2718. }
  2719. if (scale < 0) {
  2720. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  2721. // and correspondingly flip quant signs.
  2722. scale = -scale;
  2723. for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
  2724. }
  2725. for (int k = 0; k < 4; ++k) {
  2726. uint16_t u = 0;
  2727. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  2728. int grid_index = kmap_q2xs[u];
  2729. if (grid_index < 0) {
  2730. printf("Oops: found point %u not on grid:", u);
  2731. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  2732. printf("\n");
  2733. GGML_ABORT("fatal error");
  2734. }
  2735. q2[2*ib+0] |= ((uint32_t) grid_index << 8*k);
  2736. q2[2*ib+1] |= (block_signs[k] << 7*k);
  2737. }
  2738. GGML_ASSERT(scale >= 0);
  2739. scales[ib] = scale;
  2740. max_scale = MAX(max_scale, scale);
  2741. }
  2742. if (!max_scale) {
  2743. memset(y[ibl].qs, 0, QK_K/4);
  2744. continue;
  2745. }
  2746. float d = max_scale/31;
  2747. y[ibl].d = GGML_FP32_TO_FP16(d);
  2748. float id = 1/d;
  2749. for (int ib = 0; ib < QK_K/32; ++ib) {
  2750. int l = nearest_int(0.5f*(id*scales[ib]-1));
  2751. l = MAX(0, MIN(15, l));
  2752. q2[2*ib+1] |= ((uint32_t)l << 28);
  2753. }
  2754. memcpy(y[ibl].qs, q2, QK_K/4);
  2755. }
  2756. }
  2757. static void quantize_row_iq2_xs_impl(const float * restrict x, void * restrict vy, int64_t n, const float * restrict quant_weights) {
  2758. const int gindex = iq2_data_index(GGML_TYPE_IQ2_XS);
  2759. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  2760. const int * kmap_q2xs = iq2_data[gindex].map;
  2761. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  2762. GGML_ASSERT(quant_weights && "missing quantization weights");
  2763. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  2764. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  2765. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  2766. GGML_ASSERT(n%QK_K == 0);
  2767. const int kMaxQ = 3;
  2768. const int64_t nbl = n/QK_K;
  2769. block_iq2_xs * y = vy;
  2770. float scales[QK_K/16];
  2771. float weight[16];
  2772. float xval[16];
  2773. int8_t L[16];
  2774. int8_t Laux[16];
  2775. float waux[16];
  2776. bool is_on_grid[2];
  2777. bool is_on_grid_aux[2];
  2778. uint8_t block_signs[2];
  2779. uint16_t q2[2*(QK_K/16)];
  2780. for (int ibl = 0; ibl < nbl; ++ibl) {
  2781. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  2782. memset(q2, 0, QK_K/4);
  2783. memset(y[ibl].scales, 0, QK_K/32);
  2784. float max_scale = 0;
  2785. const float * xbl = x + QK_K*ibl;
  2786. float sumx2 = 0;
  2787. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  2788. float sigma2 = sumx2/QK_K;
  2789. for (int ib = 0; ib < QK_K/16; ++ib) {
  2790. const float * xb = xbl + 16*ib;
  2791. const float * qw = quant_weights + QK_K*ibl + 16*ib;
  2792. for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  2793. for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]);
  2794. for (int k = 0; k < 2; ++k) {
  2795. int nflip = 0;
  2796. uint8_t s = 0;
  2797. for (int i = 0; i < 8; ++i) {
  2798. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  2799. else {
  2800. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  2801. }
  2802. }
  2803. if (nflip%2) {
  2804. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  2805. for (int i = 1; i < 8; ++i) {
  2806. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  2807. if (ax < min) {
  2808. min = ax; imin = i;
  2809. }
  2810. }
  2811. xval[8*k+imin] = -xval[8*k+imin];
  2812. s ^= (1 << imin);
  2813. }
  2814. block_signs[k] = s & 127;
  2815. }
  2816. float max = xval[0];
  2817. for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]);
  2818. if (max < GROUP_MAX_EPS) {
  2819. scales[ib] = 0;
  2820. memset(L, 0, 16);
  2821. continue;
  2822. }
  2823. float best = 0;
  2824. float scale = max/(2*kMaxQ-1);
  2825. is_on_grid[0] = is_on_grid[1] = true;
  2826. for (int is = -9; is <= 9; ++is) {
  2827. float id = (2*kMaxQ-1+is*0.1f)/max;
  2828. float this_scale = 1/id;
  2829. for (int k = 0; k < 2; ++k) {
  2830. for (int i = 0; i < 8; ++i) {
  2831. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  2832. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  2833. }
  2834. uint16_t u = 0;
  2835. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  2836. int grid_index = kmap_q2xs[u];
  2837. is_on_grid_aux[k] = true;
  2838. if (grid_index < 0) {
  2839. is_on_grid_aux[k] = false;
  2840. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  2841. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  2842. }
  2843. }
  2844. float sumqx = 0, sumq2 = 0;
  2845. for (int i = 0; i < 16; ++i) {
  2846. float w = weight[i];
  2847. float q = 2*Laux[i] + 1;
  2848. sumqx += w*xval[i]*q;
  2849. sumq2 += w*q*q;
  2850. }
  2851. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  2852. scale = sumqx/sumq2; best = scale*sumqx;
  2853. for (int i = 0; i < 16; ++i) L[i] = Laux[i];
  2854. for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k];
  2855. }
  2856. }
  2857. int n_not_ongrid = 0;
  2858. for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  2859. if (n_not_ongrid > 0 && scale > 0) {
  2860. float id = 1/scale;
  2861. for (int k = 0; k < 2; ++k) {
  2862. if (is_on_grid[k]) continue;
  2863. uint16_t u = 0;
  2864. for (int i = 0; i < 8; ++i) {
  2865. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  2866. l = MAX(0, MIN(kMaxQ-1, l));
  2867. u |= (l << 2*i);
  2868. L[8*k + i] = l;
  2869. }
  2870. int grid_index = kmap_q2xs[u];
  2871. if (grid_index < 0) {
  2872. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  2873. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  2874. }
  2875. }
  2876. float sumqx = 0, sumq2 = 0;
  2877. for (int i = 0; i < 16; ++i) {
  2878. float w = weight[i];
  2879. float q = 2*L[i] + 1;
  2880. sumqx += w*xval[i]*q;
  2881. sumq2 += w*q*q;
  2882. }
  2883. if (sumq2 > 0) scale = sumqx/sumq2;
  2884. }
  2885. if (scale < 0) {
  2886. scale = -scale;
  2887. for (int k = 0; k < 2; ++k) block_signs[k] = (~block_signs[k]) & 127;
  2888. }
  2889. for (int k = 0; k < 2; ++k) {
  2890. uint16_t u = 0;
  2891. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  2892. int grid_index = kmap_q2xs[u];
  2893. if (grid_index < 0) {
  2894. printf("Oops: found point %u not on grid:", u);
  2895. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  2896. printf("\n");
  2897. GGML_ABORT("fatal error");
  2898. }
  2899. q2[2*ib+k] = grid_index | (block_signs[k] << 9);
  2900. }
  2901. GGML_ASSERT(scale >= 0);
  2902. scales[ib] = scale;
  2903. max_scale = MAX(max_scale, scale);
  2904. }
  2905. if (!max_scale) {
  2906. memset(y[ibl].qs, 0, QK_K/4);
  2907. continue;
  2908. }
  2909. float d = max_scale/31;
  2910. y[ibl].d = GGML_FP32_TO_FP16(d);
  2911. float id = 1/d;
  2912. for (int ib = 0; ib < QK_K/16; ++ib) {
  2913. int l = nearest_int(0.5f*(id*scales[ib]-1));
  2914. l = MAX(0, MIN(15, l));
  2915. if (ib%2 == 0) y[ibl].scales[ib/2] = l;
  2916. else y[ibl].scales[ib/2] |= (l << 4);
  2917. }
  2918. memcpy(y[ibl].qs, q2, QK_K/4);
  2919. }
  2920. }
  2921. size_t quantize_iq2_xxs(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  2922. GGML_ASSERT(n_per_row%QK_K == 0);
  2923. int64_t nblock = n_per_row/QK_K;
  2924. char * qrow = (char *)dst;
  2925. for (int64_t row = 0; row < nrow; ++row) {
  2926. quantize_row_iq2_xxs_impl(src, qrow, n_per_row, quant_weights);
  2927. src += n_per_row;
  2928. qrow += nblock*sizeof(block_iq2_xxs);
  2929. }
  2930. return nrow * nblock * sizeof(block_iq2_xxs);
  2931. }
  2932. size_t quantize_iq2_xs(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  2933. GGML_ASSERT(n_per_row%QK_K == 0);
  2934. int64_t nblock = n_per_row/QK_K;
  2935. char * qrow = (char *)dst;
  2936. for (int64_t row = 0; row < nrow; ++row) {
  2937. quantize_row_iq2_xs_impl(src, qrow, n_per_row, quant_weights);
  2938. src += n_per_row;
  2939. qrow += nblock*sizeof(block_iq2_xs);
  2940. }
  2941. return nrow * nblock * sizeof(block_iq2_xs);
  2942. }
  2943. //
  2944. // ============================================= 3-bit using D4 lattice
  2945. //
  2946. typedef struct {
  2947. uint32_t * grid;
  2948. int * map;
  2949. uint16_t * neighbours;
  2950. } iq3_entry_t;
  2951. static iq3_entry_t iq3_data[2] = {
  2952. {NULL, NULL, NULL},
  2953. {NULL, NULL, NULL},
  2954. };
  2955. static inline int iq3_data_index(int grid_size) {
  2956. (void)grid_size;
  2957. GGML_ASSERT(grid_size == 256 || grid_size == 512);
  2958. return grid_size == 256 ? 0 : 1;
  2959. }
  2960. static int iq3_compare_func(const void * left, const void * right) {
  2961. const int * l = (const int *)left;
  2962. const int * r = (const int *)right;
  2963. return l[0] < r[0] ? -1 : l[0] > r[0] ? 1 : l[1] < r[1] ? -1 : l[1] > r[1] ? 1 : 0;
  2964. }
  2965. void iq3xs_init_impl(int grid_size) {
  2966. const int gindex = iq3_data_index(grid_size);
  2967. if (iq3_data[gindex].grid) {
  2968. return;
  2969. }
  2970. static const uint16_t kgrid_256[256] = {
  2971. 0, 2, 4, 9, 11, 15, 16, 18, 25, 34, 59, 61, 65, 67, 72, 74,
  2972. 81, 85, 88, 90, 97, 108, 120, 128, 130, 132, 137, 144, 146, 153, 155, 159,
  2973. 169, 175, 189, 193, 199, 200, 202, 213, 248, 267, 287, 292, 303, 315, 317, 321,
  2974. 327, 346, 362, 413, 436, 456, 460, 462, 483, 497, 513, 515, 520, 522, 529, 531,
  2975. 536, 538, 540, 551, 552, 576, 578, 585, 592, 594, 641, 643, 648, 650, 657, 664,
  2976. 698, 704, 706, 720, 729, 742, 758, 769, 773, 808, 848, 852, 870, 889, 901, 978,
  2977. 992, 1024, 1026, 1033, 1035, 1040, 1042, 1046, 1049, 1058, 1089, 1091, 1093, 1096, 1098, 1105,
  2978. 1112, 1139, 1143, 1144, 1152, 1154, 1161, 1167, 1168, 1170, 1183, 1184, 1197, 1217, 1224, 1228,
  2979. 1272, 1276, 1309, 1323, 1347, 1367, 1377, 1404, 1473, 1475, 1486, 1509, 1537, 1544, 1546, 1553,
  2980. 1555, 1576, 1589, 1594, 1600, 1602, 1616, 1625, 1636, 1638, 1665, 1667, 1672, 1685, 1706, 1722,
  2981. 1737, 1755, 1816, 1831, 1850, 1856, 1862, 1874, 1901, 1932, 1950, 1971, 2011, 2032, 2052, 2063,
  2982. 2077, 2079, 2091, 2095, 2172, 2192, 2207, 2208, 2224, 2230, 2247, 2277, 2308, 2345, 2356, 2389,
  2983. 2403, 2424, 2501, 2504, 2506, 2520, 2570, 2593, 2616, 2624, 2630, 2646, 2669, 2700, 2714, 2746,
  2984. 2754, 2795, 2824, 2835, 2839, 2874, 2882, 2905, 2984, 3028, 3042, 3092, 3108, 3110, 3124, 3153,
  2985. 3185, 3215, 3252, 3288, 3294, 3364, 3397, 3434, 3483, 3523, 3537, 3587, 3589, 3591, 3592, 3610,
  2986. 3626, 3670, 3680, 3722, 3749, 3754, 3776, 3789, 3803, 3824, 3857, 3873, 3904, 3906, 3924, 3992,
  2987. };
  2988. static const uint16_t kgrid_512[512] = {
  2989. 0, 1, 2, 5, 7, 8, 9, 10, 12, 14, 16, 17, 21, 27, 32, 34,
  2990. 37, 39, 41, 43, 48, 50, 57, 60, 63, 64, 65, 66, 68, 72, 73, 77,
  2991. 80, 83, 87, 89, 93, 100, 113, 117, 122, 128, 129, 133, 135, 136, 139, 142,
  2992. 145, 149, 152, 156, 162, 165, 167, 169, 171, 184, 187, 195, 201, 205, 208, 210,
  2993. 217, 219, 222, 228, 232, 234, 247, 249, 253, 256, 267, 271, 273, 276, 282, 288,
  2994. 291, 297, 312, 322, 324, 336, 338, 342, 347, 353, 357, 359, 374, 379, 390, 393,
  2995. 395, 409, 426, 441, 448, 450, 452, 464, 466, 470, 475, 488, 492, 512, 513, 514,
  2996. 516, 520, 521, 523, 525, 527, 528, 530, 537, 540, 542, 556, 558, 561, 570, 576,
  2997. 577, 579, 582, 584, 588, 593, 600, 603, 609, 616, 618, 632, 638, 640, 650, 653,
  2998. 655, 656, 660, 666, 672, 675, 685, 688, 698, 705, 708, 711, 712, 715, 721, 727,
  2999. 728, 732, 737, 754, 760, 771, 773, 778, 780, 793, 795, 802, 806, 808, 812, 833,
  3000. 840, 843, 849, 856, 858, 873, 912, 916, 919, 932, 934, 961, 963, 968, 970, 977,
  3001. 989, 993, 1010, 1016, 1024, 1025, 1027, 1029, 1031, 1032, 1034, 1036, 1038, 1041, 1043, 1047,
  3002. 1048, 1050, 1057, 1059, 1061, 1064, 1066, 1079, 1080, 1083, 1085, 1088, 1090, 1096, 1099, 1103,
  3003. 1106, 1109, 1113, 1116, 1122, 1129, 1153, 1156, 1159, 1169, 1171, 1176, 1183, 1185, 1195, 1199,
  3004. 1209, 1212, 1216, 1218, 1221, 1225, 1234, 1236, 1241, 1243, 1250, 1256, 1270, 1281, 1287, 1296,
  3005. 1299, 1306, 1309, 1313, 1338, 1341, 1348, 1353, 1362, 1375, 1376, 1387, 1400, 1408, 1410, 1415,
  3006. 1425, 1453, 1457, 1477, 1481, 1494, 1496, 1507, 1512, 1538, 1545, 1547, 1549, 1551, 1554, 1561,
  3007. 1563, 1565, 1570, 1572, 1575, 1577, 1587, 1593, 1601, 1603, 1605, 1612, 1617, 1619, 1632, 1648,
  3008. 1658, 1662, 1664, 1674, 1680, 1690, 1692, 1704, 1729, 1736, 1740, 1745, 1747, 1751, 1752, 1761,
  3009. 1763, 1767, 1773, 1787, 1795, 1801, 1806, 1810, 1817, 1834, 1840, 1844, 1857, 1864, 1866, 1877,
  3010. 1882, 1892, 1902, 1915, 1934, 1953, 1985, 1987, 2000, 2002, 2013, 2048, 2052, 2058, 2064, 2068,
  3011. 2071, 2074, 2081, 2088, 2104, 2114, 2119, 2121, 2123, 2130, 2136, 2141, 2147, 2153, 2157, 2177,
  3012. 2179, 2184, 2189, 2193, 2203, 2208, 2223, 2226, 2232, 2244, 2249, 2251, 2256, 2258, 2265, 2269,
  3013. 2304, 2306, 2324, 2335, 2336, 2361, 2373, 2375, 2385, 2418, 2443, 2460, 2480, 2504, 2509, 2520,
  3014. 2531, 2537, 2562, 2568, 2572, 2578, 2592, 2596, 2599, 2602, 2614, 2620, 2625, 2627, 2629, 2634,
  3015. 2641, 2650, 2682, 2688, 2697, 2707, 2712, 2718, 2731, 2754, 2759, 2760, 2775, 2788, 2793, 2805,
  3016. 2811, 2817, 2820, 2832, 2842, 2854, 2890, 2902, 2921, 2923, 2978, 3010, 3012, 3026, 3081, 3083,
  3017. 3085, 3097, 3099, 3120, 3136, 3152, 3159, 3188, 3210, 3228, 3234, 3245, 3250, 3256, 3264, 3276,
  3018. 3281, 3296, 3349, 3363, 3378, 3392, 3395, 3420, 3440, 3461, 3488, 3529, 3531, 3584, 3588, 3591,
  3019. 3600, 3602, 3614, 3616, 3628, 3634, 3650, 3657, 3668, 3683, 3685, 3713, 3716, 3720, 3726, 3729,
  3020. 3736, 3753, 3778, 3802, 3805, 3819, 3841, 3845, 3851, 3856, 3880, 3922, 3938, 3970, 3993, 4032,
  3021. };
  3022. const int kmap_size = 4096;
  3023. const int nwant = grid_size == 256 ? 2 : 3;
  3024. const uint16_t * kgrid = grid_size == 256 ? kgrid_256 : kgrid_512;
  3025. uint32_t * kgrid_q3xs;
  3026. int * kmap_q3xs;
  3027. uint16_t * kneighbors_q3xs;
  3028. //printf("================================================================= %s(grid_size = %d)\n", __func__, grid_size);
  3029. uint32_t * the_grid = (uint32_t *)malloc(grid_size*sizeof(uint32_t));
  3030. for (int k = 0; k < grid_size; ++k) {
  3031. int8_t * pos = (int8_t *)(the_grid + k);
  3032. for (int i = 0; i < 4; ++i) {
  3033. int l = (kgrid[k] >> 3*i) & 0x7;
  3034. pos[i] = 2*l + 1;
  3035. }
  3036. }
  3037. kgrid_q3xs = the_grid;
  3038. iq3_data[gindex].grid = the_grid;
  3039. kmap_q3xs = (int *)malloc(kmap_size*sizeof(int));
  3040. iq3_data[gindex].map = kmap_q3xs;
  3041. for (int i = 0; i < kmap_size; ++i) kmap_q3xs[i] = -1;
  3042. uint32_t aux32;
  3043. uint8_t * aux8 = (uint8_t *)&aux32;
  3044. for (int i = 0; i < grid_size; ++i) {
  3045. aux32 = kgrid_q3xs[i];
  3046. uint16_t index = 0;
  3047. for (int k=0; k<4; ++k) {
  3048. uint16_t q = (aux8[k] - 1)/2;
  3049. index |= (q << 3*k);
  3050. }
  3051. kmap_q3xs[index] = i;
  3052. }
  3053. int8_t pos[4];
  3054. int * dist2 = (int *)malloc(2*grid_size*sizeof(int));
  3055. int num_neighbors = 0, num_not_in_map = 0;
  3056. for (int i = 0; i < kmap_size; ++i) {
  3057. if (kmap_q3xs[i] >= 0) continue;
  3058. ++num_not_in_map;
  3059. for (int k = 0; k < 4; ++k) {
  3060. int l = (i >> 3*k) & 0x7;
  3061. pos[k] = 2*l + 1;
  3062. }
  3063. for (int j = 0; j < grid_size; ++j) {
  3064. const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
  3065. int d2 = 0;
  3066. for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  3067. dist2[2*j+0] = d2;
  3068. dist2[2*j+1] = j;
  3069. }
  3070. qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
  3071. int n = 0; int d2 = dist2[0];
  3072. int nhave = 1;
  3073. for (int j = 0; j < grid_size; ++j) {
  3074. if (dist2[2*j] > d2) {
  3075. if (nhave == nwant) break;
  3076. d2 = dist2[2*j];
  3077. ++nhave;
  3078. }
  3079. ++n;
  3080. }
  3081. num_neighbors += n;
  3082. }
  3083. //printf("%s: %d neighbours in total\n", __func__, num_neighbors);
  3084. kneighbors_q3xs = (uint16_t *)malloc((num_neighbors + num_not_in_map)*sizeof(uint16_t));
  3085. iq3_data[gindex].neighbours = kneighbors_q3xs;
  3086. int counter = 0;
  3087. for (int i = 0; i < kmap_size; ++i) {
  3088. if (kmap_q3xs[i] >= 0) continue;
  3089. for (int k = 0; k < 4; ++k) {
  3090. int l = (i >> 3*k) & 0x7;
  3091. pos[k] = 2*l + 1;
  3092. }
  3093. for (int j = 0; j < grid_size; ++j) {
  3094. const int8_t * pg = (const int8_t *)(kgrid_q3xs + j);
  3095. int d2 = 0;
  3096. for (int k = 0; k < 4; ++k) d2 += (pg[k] - pos[k])*(pg[k] - pos[k]);
  3097. dist2[2*j+0] = d2;
  3098. dist2[2*j+1] = j;
  3099. }
  3100. qsort(dist2, grid_size, 2*sizeof(int), iq3_compare_func);
  3101. kmap_q3xs[i] = -(counter + 1);
  3102. int d2 = dist2[0];
  3103. uint16_t * start = &kneighbors_q3xs[counter++];
  3104. int n = 0, nhave = 1;
  3105. for (int j = 0; j < grid_size; ++j) {
  3106. if (dist2[2*j] > d2) {
  3107. if (nhave == nwant) break;
  3108. d2 = dist2[2*j];
  3109. ++nhave;
  3110. }
  3111. kneighbors_q3xs[counter++] = dist2[2*j+1];
  3112. ++n;
  3113. }
  3114. *start = n;
  3115. }
  3116. free(dist2);
  3117. }
  3118. void iq3xs_free_impl(int grid_size) {
  3119. GGML_ASSERT(grid_size == 256 || grid_size == 512);
  3120. const int gindex = iq3_data_index(grid_size);
  3121. if (iq3_data[gindex].grid) {
  3122. free(iq3_data[gindex].grid); iq3_data[gindex].grid = NULL;
  3123. free(iq3_data[gindex].map); iq3_data[gindex].map = NULL;
  3124. free(iq3_data[gindex].neighbours); iq3_data[gindex].neighbours = NULL;
  3125. }
  3126. }
  3127. static int iq3_find_best_neighbour(const uint16_t * restrict neighbours, const uint32_t * restrict grid,
  3128. const float * restrict xval, const float * restrict weight, float scale, int8_t * restrict L) {
  3129. int num_neighbors = neighbours[0];
  3130. GGML_ASSERT(num_neighbors > 0);
  3131. float best_d2 = FLT_MAX;
  3132. int grid_index = -1;
  3133. for (int j = 1; j <= num_neighbors; ++j) {
  3134. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  3135. float d2 = 0;
  3136. for (int i = 0; i < 4; ++i) {
  3137. float q = pg[i];
  3138. float diff = scale*q - xval[i];
  3139. d2 += weight[i]*diff*diff;
  3140. }
  3141. if (d2 < best_d2) {
  3142. best_d2 = d2; grid_index = neighbours[j];
  3143. }
  3144. }
  3145. GGML_ASSERT(grid_index >= 0);
  3146. const int8_t * pg = (const int8_t *)(grid + grid_index);
  3147. for (int i = 0; i < 4; ++i) L[i] = (pg[i] - 1)/2;
  3148. return grid_index;
  3149. }
  3150. static void quantize_row_iq3_xxs_impl(int grid_size, const float * restrict x, void * restrict vy, int64_t n,
  3151. const float * restrict quant_weights) {
  3152. const int gindex = iq3_data_index(grid_size);
  3153. const uint32_t * kgrid_q3xs = iq3_data[gindex].grid;
  3154. const int * kmap_q3xs = iq3_data[gindex].map;
  3155. const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours;
  3156. //GGML_ASSERT(quant_weights && "missing quantization weights");
  3157. GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?");
  3158. GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?");
  3159. GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?");
  3160. GGML_ASSERT(n%QK_K == 0);
  3161. const int kMaxQ = 8;
  3162. const int64_t nbl = n/QK_K;
  3163. ggml_fp16_t * dh;
  3164. uint8_t * qs;
  3165. int block_size;
  3166. if (grid_size == 256) {
  3167. block_iq3_xxs * y = vy;
  3168. dh = &y->d;
  3169. qs = y->qs;
  3170. block_size = sizeof(block_iq3_xxs);
  3171. } else {
  3172. block_iq3_s * y = vy;
  3173. dh = &y->d;
  3174. qs = y->qs;
  3175. block_size = sizeof(block_iq3_s);
  3176. }
  3177. int quant_size = block_size - sizeof(ggml_fp16_t);
  3178. float scales[QK_K/32];
  3179. float weight[32];
  3180. float xval[32];
  3181. int8_t L[32];
  3182. int8_t Laux[32];
  3183. float waux[32];
  3184. bool is_on_grid[8];
  3185. bool is_on_grid_aux[8];
  3186. uint8_t block_signs[8];
  3187. uint8_t q3[3*(QK_K/8)+QK_K/32];
  3188. uint32_t * scales_and_signs = (uint32_t *)(q3 + QK_K/4);
  3189. uint8_t * qh = q3 + 3*(QK_K/8);
  3190. for (int ibl = 0; ibl < nbl; ++ibl) {
  3191. dh[0] = GGML_FP32_TO_FP16(0.f);
  3192. memset(q3, 0, 3*QK_K/8+QK_K/32);
  3193. float max_scale = 0;
  3194. const float * xbl = x + QK_K*ibl;
  3195. float sumx2 = 0;
  3196. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  3197. float sigma2 = 2*sumx2/QK_K;
  3198. for (int ib = 0; ib < QK_K/32; ++ib) {
  3199. const float * xb = xbl + 32*ib;
  3200. if (quant_weights) {
  3201. const float * qw = quant_weights + QK_K*ibl + 32*ib;
  3202. for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  3203. } else {
  3204. for (int i = 0; i < 32; ++i) weight[i] = xb[i]*xb[i];
  3205. }
  3206. for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
  3207. for (int k = 0; k < 4; ++k) {
  3208. int nflip = 0;
  3209. uint8_t s = 0;
  3210. for (int i = 0; i < 8; ++i) {
  3211. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  3212. else {
  3213. xval[8*k + i] = -xb[8*k + i]; ++nflip; s |= (1 << i);
  3214. }
  3215. }
  3216. if (nflip%2) {
  3217. int imin = 0; float min = weight[8*k+imin]*xb[8*k+imin]*xb[8*k+imin];
  3218. for (int i = 1; i < 8; ++i) {
  3219. float ax = weight[8*k+i]*xb[8*k+i]*xb[8*k+i];
  3220. if (ax < min) {
  3221. min = ax; imin = i;
  3222. }
  3223. }
  3224. xval[8*k+imin] = -xval[8*k+imin];
  3225. s ^= (1 << imin);
  3226. }
  3227. block_signs[k] = s & 127;
  3228. }
  3229. float max = xval[0];
  3230. for (int i = 1; i < 32; ++i) max = MAX(max, xval[i]);
  3231. if (max < GROUP_MAX_EPS_IQ3_XXS) {
  3232. scales[ib] = 0;
  3233. memset(L, 0, 32);
  3234. continue;
  3235. }
  3236. float best = 0;
  3237. float scale = max/(2*kMaxQ-1);
  3238. for (int is = -15; is <= 15; ++is) {
  3239. float id = (2*kMaxQ-1+is*0.2f)/max;
  3240. float this_scale = 1/id;
  3241. for (int k = 0; k < 8; ++k) {
  3242. for (int i = 0; i < 4; ++i) {
  3243. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  3244. Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l));
  3245. }
  3246. uint16_t u = 0;
  3247. for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i);
  3248. int grid_index = kmap_q3xs[u];
  3249. is_on_grid_aux[k] = true;
  3250. if (grid_index < 0) {
  3251. is_on_grid_aux[k] = false;
  3252. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  3253. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k);
  3254. }
  3255. }
  3256. float sumqx = 0, sumq2 = 0;
  3257. for (int i = 0; i < 32; ++i) {
  3258. float w = weight[i];
  3259. float q = 2*Laux[i] + 1;
  3260. sumqx += w*xval[i]*q;
  3261. sumq2 += w*q*q;
  3262. }
  3263. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  3264. scale = sumqx/sumq2; best = scale*sumqx;
  3265. for (int i = 0; i < 32; ++i) L[i] = Laux[i];
  3266. for (int k = 0; k < 8; ++k) is_on_grid[k] = is_on_grid_aux[k];
  3267. }
  3268. }
  3269. int n_not_ongrid = 0;
  3270. for (int k = 0; k < 8; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  3271. if (n_not_ongrid > 0 && scale > 0) {
  3272. float id = 1/scale;
  3273. for (int k = 0; k < 8; ++k) {
  3274. if (is_on_grid[k]) continue;
  3275. uint16_t u = 0;
  3276. for (int i = 0; i < 4; ++i) {
  3277. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  3278. l = MAX(0, MIN(kMaxQ-1, l));
  3279. u |= (l << 3*i);
  3280. }
  3281. int grid_index = kmap_q3xs[u];
  3282. if (grid_index < 0) {
  3283. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  3284. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k);
  3285. }
  3286. const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index);
  3287. for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2;
  3288. }
  3289. float sumqx = 0, sumq2 = 0;
  3290. for (int i = 0; i < 32; ++i) {
  3291. float w = weight[i];
  3292. float q = 2*L[i] + 1;
  3293. sumqx += w*xval[i]*q;
  3294. sumq2 += w*q*q;
  3295. }
  3296. if (sumq2 > 0) scale = sumqx/sumq2;
  3297. }
  3298. if (scale < 0) {
  3299. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  3300. // and correspondingly flip quant signs.
  3301. scale = -scale;
  3302. for (int k = 0; k < 4; ++k) block_signs[k] = (~block_signs[k]) & 127;
  3303. }
  3304. for (int k = 0; k < 8; ++k) {
  3305. uint16_t u = 0;
  3306. for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i);
  3307. int grid_index = kmap_q3xs[u];
  3308. if (grid_index < 0) {
  3309. printf("Oops: found point %u not on grid:", u);
  3310. for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]);
  3311. printf("\n");
  3312. GGML_ABORT("fatal error");
  3313. }
  3314. if (grid_size == 256) {
  3315. q3[8*ib+k] = grid_index;
  3316. } else {
  3317. q3[8*ib+k] = grid_index & 255;
  3318. qh[ib] |= ((grid_index >> 8) << k);
  3319. }
  3320. }
  3321. scales_and_signs[ib] = block_signs[0] | (block_signs[1] << 7) | (block_signs[2] << 14) | (block_signs[3] << 21);
  3322. GGML_ASSERT(scale >= 0);
  3323. scales[ib] = scale;
  3324. max_scale = MAX(max_scale, scale);
  3325. }
  3326. if (!max_scale) {
  3327. memset(qs, 0, quant_size);
  3328. dh += block_size/sizeof(ggml_fp16_t);
  3329. qs += block_size;
  3330. continue;
  3331. }
  3332. float d = max_scale/31;
  3333. dh[0] = GGML_FP32_TO_FP16(d * 1.0125f); // small improvement via this fudge factor
  3334. float id = 1/d;
  3335. for (int ib = 0; ib < QK_K/32; ++ib) {
  3336. int l = nearest_int(0.5f*(id*scales[ib]-1));
  3337. l = MAX(0, MIN(15, l));
  3338. scales_and_signs[ib] |= ((uint32_t)l << 28);
  3339. }
  3340. memcpy(qs, q3, quant_size);
  3341. dh += block_size/sizeof(ggml_fp16_t);
  3342. qs += block_size;
  3343. }
  3344. }
  3345. size_t quantize_iq3_xxs(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  3346. GGML_ASSERT(n_per_row%QK_K == 0);
  3347. int64_t nblock = n_per_row/QK_K;
  3348. char * qrow = (char *)dst;
  3349. for (int64_t row = 0; row < nrow; ++row) {
  3350. quantize_row_iq3_xxs_impl(256, src, qrow, n_per_row, quant_weights);
  3351. src += n_per_row;
  3352. qrow += nblock*sizeof(block_iq3_xxs);
  3353. }
  3354. return nrow * nblock * sizeof(block_iq3_xxs);
  3355. }
  3356. void quantize_row_iq3_xxs_ref(const float * restrict x, block_iq3_xxs * restrict y, int64_t k) {
  3357. assert(k % QK_K == 0);
  3358. quantize_row_iq3_xxs_impl(256, x, y, k, NULL);
  3359. }
  3360. static void quantize_row_iq3_s_impl(int block_size, const float * restrict x, void * restrict vy, int n,
  3361. const float * restrict quant_weights,
  3362. float * scales,
  3363. float * weight,
  3364. float * xval,
  3365. int8_t * L,
  3366. int8_t * Laux,
  3367. float * waux,
  3368. bool * is_on_grid,
  3369. bool * is_on_grid_aux,
  3370. uint8_t * block_signs) {
  3371. const int gindex = iq3_data_index(512);
  3372. const uint32_t * kgrid_q3xs = iq3_data[gindex].grid;
  3373. const int * kmap_q3xs = iq3_data[gindex].map;
  3374. const uint16_t * kneighbors_q3xs = iq3_data[gindex].neighbours;
  3375. //GGML_ASSERT(quant_weights && "missing quantization weights");
  3376. GGML_ASSERT(kgrid_q3xs && "forgot to call ggml_quantize_init()?");
  3377. GGML_ASSERT(kmap_q3xs && "forgot to call ggml_quantize_init()?");
  3378. GGML_ASSERT(kneighbors_q3xs && "forgot to call ggml_quantize_init()?");
  3379. GGML_ASSERT(n%QK_K == 0);
  3380. const int kMaxQ = 8;
  3381. const int64_t nbl = n/QK_K;
  3382. block_iq3_s * y = vy;
  3383. const int bs4 = block_size/4;
  3384. const int bs8 = block_size/8;
  3385. for (int ibl = 0; ibl < nbl; ++ibl) {
  3386. memset(&y[ibl], 0, sizeof(block_iq3_s));
  3387. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  3388. uint8_t * qs = y[ibl].qs;
  3389. uint8_t * qh = y[ibl].qh;
  3390. uint8_t * signs = y[ibl].signs;
  3391. float max_scale = 0;
  3392. const float * xbl = x + QK_K*ibl;
  3393. float sumx2 = 0;
  3394. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  3395. float sigma2 = 2*sumx2/QK_K;
  3396. for (int ib = 0; ib < QK_K/block_size; ++ib) {
  3397. const float * xb = xbl + block_size*ib;
  3398. if (quant_weights) {
  3399. const float * qw = quant_weights + QK_K*ibl + block_size*ib;
  3400. for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  3401. } else {
  3402. for (int i = 0; i < block_size; ++i) weight[i] = xb[i]*xb[i];
  3403. }
  3404. for (int i = 0; i < block_size; ++i) waux[i] = sqrtf(weight[i]);
  3405. for (int k = 0; k < bs8; ++k) {
  3406. uint8_t s = 0;
  3407. for (int i = 0; i < 8; ++i) {
  3408. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  3409. else {
  3410. xval[8*k + i] = -xb[8*k + i]; s |= (1 << i);
  3411. }
  3412. }
  3413. block_signs[k] = s;
  3414. }
  3415. float max = xval[0];
  3416. for (int i = 1; i < block_size; ++i) max = MAX(max, xval[i]);
  3417. if (!max) {
  3418. scales[ib] = 0;
  3419. continue;
  3420. }
  3421. float best = 0;
  3422. float scale = max/(2*kMaxQ-1);
  3423. for (int k = 0; k < bs4; ++k) is_on_grid[k] = false;
  3424. for (int is = -9; is <= 9; ++is) {
  3425. float id = (2*kMaxQ-1+is*0.2f)/max;
  3426. float this_scale = 1/id;
  3427. for (int k = 0; k < bs4; ++k) {
  3428. for (int i = 0; i < 4; ++i) {
  3429. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  3430. Laux[4*k+i] = MAX(0, MIN(kMaxQ-1, l));
  3431. }
  3432. uint16_t u = 0;
  3433. for (int i = 0; i < 4; ++i) u |= (Laux[4*k+i] << 3*i);
  3434. int grid_index = kmap_q3xs[u];
  3435. is_on_grid_aux[k] = true;
  3436. if (grid_index < 0) {
  3437. is_on_grid_aux[k] = false;
  3438. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  3439. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, this_scale, Laux + 4*k);
  3440. }
  3441. }
  3442. float sumqx = 0, sumq2 = 0;
  3443. for (int i = 0; i < block_size; ++i) {
  3444. float w = weight[i];
  3445. float q = 2*Laux[i] + 1;
  3446. sumqx += w*xval[i]*q;
  3447. sumq2 += w*q*q;
  3448. }
  3449. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  3450. scale = sumqx/sumq2; best = scale*sumqx;
  3451. for (int i = 0; i < block_size; ++i) L[i] = Laux[i];
  3452. for (int k = 0; k < bs4; ++k) is_on_grid[k] = is_on_grid_aux[k];
  3453. }
  3454. }
  3455. int n_not_ongrid = 0;
  3456. for (int k = 0; k < bs4; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  3457. if (n_not_ongrid > 0 && scale > 0) {
  3458. float id = 1/scale;
  3459. for (int k = 0; k < bs4; ++k) {
  3460. //if (is_on_grid[k]) continue;
  3461. uint16_t u = 0;
  3462. for (int i = 0; i < 4; ++i) {
  3463. int l = nearest_int(0.5f*(id*xval[4*k+i]-1));
  3464. l = MAX(0, MIN(kMaxQ-1, l));
  3465. u |= (l << 3*i);
  3466. }
  3467. int grid_index = kmap_q3xs[u];
  3468. if (grid_index < 0) {
  3469. const uint16_t * neighbours = kneighbors_q3xs - kmap_q3xs[u] - 1;
  3470. grid_index = iq3_find_best_neighbour(neighbours, kgrid_q3xs, xval + 4*k, waux + 4*k, scale, L + 4*k);
  3471. }
  3472. const int8_t * pg = (const int8_t *)(kgrid_q3xs + grid_index);
  3473. for (int i = 0; i < 4; ++i) L[4*k+i] = (pg[i] - 1)/2;
  3474. }
  3475. float sumqx = 0, sumq2 = 0;
  3476. for (int i = 0; i < block_size; ++i) {
  3477. float w = weight[i];
  3478. float q = 2*L[i] + 1;
  3479. sumqx += w*xval[i]*q;
  3480. sumq2 += w*q*q;
  3481. }
  3482. if (sumq2 > 0) scale = sumqx/sumq2;
  3483. }
  3484. if (scale < 0) {
  3485. // This should never happen, but just in case, flip scale so that it is positive (we use uint's to encode the scale)
  3486. // and correspondingly flip quant signs.
  3487. scale = -scale;
  3488. for (int k = 0; k < bs8; ++k) block_signs[k] = ~block_signs[k];
  3489. }
  3490. for (int k = 0; k < bs4; ++k) {
  3491. uint16_t u = 0;
  3492. for (int i = 0; i < 4; ++i) u |= (L[4*k+i] << 3*i);
  3493. int grid_index = kmap_q3xs[u];
  3494. if (grid_index < 0) {
  3495. printf("Oops: found point %u not on grid:", u);
  3496. for (int i = 0; i < 4; ++i) printf(" %d", L[4*k+i]);
  3497. printf("\n");
  3498. GGML_ABORT("fatal error");
  3499. }
  3500. qs[k] = grid_index & 255;
  3501. qh[(ib*bs4+k)/8] |= ((grid_index >> 8) << ((ib*bs4+k)%8));
  3502. }
  3503. qs += bs4;
  3504. for (int k = 0; k < bs8; ++k) signs[k] = block_signs[k];
  3505. signs += bs8;
  3506. GGML_ASSERT(scale >= 0);
  3507. scales[ib] = scale;
  3508. max_scale = MAX(max_scale, scale);
  3509. }
  3510. if (!max_scale) {
  3511. continue;
  3512. }
  3513. float d = max_scale/31;
  3514. y[ibl].d = GGML_FP32_TO_FP16(d * 1.033f);
  3515. float id = 1/d;
  3516. for (int ib = 0; ib < QK_K/block_size; ib += 2) {
  3517. int l1 = nearest_int(0.5f*(id*scales[ib+0]-1));
  3518. l1 = MAX(0, MIN(15, l1));
  3519. int l2 = nearest_int(0.5f*(id*scales[ib+1]-1));
  3520. l2 = MAX(0, MIN(15, l2));
  3521. y[ibl].scales[ib/2] = l1 | (l2 << 4);
  3522. }
  3523. }
  3524. }
  3525. #define IQ3S_BLOCK_SIZE 32
  3526. size_t quantize_iq3_s(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  3527. GGML_ASSERT(n_per_row%QK_K == 0);
  3528. int64_t nblock = n_per_row/QK_K;
  3529. float scales[QK_K/IQ3S_BLOCK_SIZE];
  3530. float weight[IQ3S_BLOCK_SIZE];
  3531. float xval[IQ3S_BLOCK_SIZE];
  3532. int8_t L[IQ3S_BLOCK_SIZE];
  3533. int8_t Laux[IQ3S_BLOCK_SIZE];
  3534. float waux[IQ3S_BLOCK_SIZE];
  3535. bool is_on_grid[IQ3S_BLOCK_SIZE/4];
  3536. bool is_on_grid_aux[IQ3S_BLOCK_SIZE/4];
  3537. uint8_t block_signs[IQ3S_BLOCK_SIZE/8];
  3538. char * qrow = (char *)dst;
  3539. for (int64_t row = 0; row < nrow; ++row) {
  3540. quantize_row_iq3_s_impl(IQ3S_BLOCK_SIZE, src, qrow, n_per_row, quant_weights,
  3541. scales, weight, xval, L, Laux, waux, is_on_grid, is_on_grid_aux, block_signs);
  3542. src += n_per_row;
  3543. qrow += nblock*sizeof(block_iq3_s);
  3544. }
  3545. return nrow * nblock * sizeof(block_iq3_s);
  3546. }
  3547. void quantize_row_iq3_s_ref(const float * restrict x, block_iq3_s * restrict y, int64_t k) {
  3548. assert(k % QK_K == 0);
  3549. quantize_iq3_s(x, y, 1, k, NULL);
  3550. }
  3551. // =================================== 1.5 bpw ===================================================
  3552. static int iq1_find_best_neighbour(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
  3553. const float * restrict xval, const float * restrict weight, float * scale, int8_t * restrict L, int ngrid) {
  3554. int num_neighbors = neighbours[0];
  3555. GGML_ASSERT(num_neighbors > 0);
  3556. float best_score = -FLT_MAX;
  3557. int grid_index = -1;
  3558. for (int j = 1; j <= num_neighbors; ++j) {
  3559. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  3560. float sumqx = 0, sumq2 = 0;
  3561. for (int i = 0; i < 8; ++i) {
  3562. float q = (pg[i] - 3)/2;
  3563. float w = weight[i];
  3564. sumqx += w*q*xval[i];
  3565. sumq2 += w*q*q;
  3566. }
  3567. if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  3568. *scale = sumqx/sumq2; best_score = *scale * sumqx;
  3569. grid_index = neighbours[j];
  3570. }
  3571. }
  3572. if (grid_index < 0) {
  3573. for (int i = 0; i < ngrid; ++i) {
  3574. const int8_t * grid_i = (const int8_t *)(grid + i);
  3575. float sumqx = 0, sumq2 = 0;
  3576. for (int j = 0; j < 8; ++j) {
  3577. float w = weight[j];
  3578. float q = (grid_i[j] - 3)/2;
  3579. sumqx += w*q*xval[j];
  3580. sumq2 += w*q*q;
  3581. }
  3582. if (sumqx > 0 && sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  3583. *scale = sumqx/sumq2; best_score = *scale*sumqx;
  3584. grid_index = i;
  3585. }
  3586. }
  3587. }
  3588. if (grid_index < 0) {
  3589. printf("Oops, did not find grid point\n");
  3590. printf("Have %d neighbours\n", num_neighbors);
  3591. for (int j = 1; j <= num_neighbors; ++j) {
  3592. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  3593. float sumqx = 0, sumq2 = 0;
  3594. for (int i = 0; i < 8; ++i) {
  3595. float q = (pg[i] - 3)/2;
  3596. float w = weight[i];
  3597. sumqx += w*q*xval[i];
  3598. sumq2 += w*q*q;
  3599. }
  3600. printf(" neighbour %d: sumqx = %g sumq2 = %g\n", j, (double)sumqx, (double)sumq2);
  3601. }
  3602. }
  3603. GGML_ASSERT(grid_index >= 0);
  3604. //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  3605. *scale *= 1.05f; // This is a fudge factor. Don't ask me why it improves the result.
  3606. //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  3607. const int8_t * pg = (const int8_t *)(grid + grid_index);
  3608. for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
  3609. return grid_index;
  3610. }
  3611. static int iq1_find_best_neighbour2(const uint16_t * restrict neighbours, const uint64_t * restrict grid,
  3612. const float * restrict xval, const float * restrict weight, float scale, const float * restrict xg, int8_t * restrict L, int ngrid) {
  3613. int num_neighbors = neighbours[0];
  3614. GGML_ASSERT(num_neighbors > 0);
  3615. float best_score = FLT_MAX;
  3616. int grid_index = -1;
  3617. for (int j = 1; j <= num_neighbors; ++j) {
  3618. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  3619. float d2 = 0;
  3620. for (int i = 0; i < 8; ++i) {
  3621. float q = xg[(pg[i] - 1)/2];
  3622. float w = weight[i];
  3623. float diff = scale*q - xval[i];
  3624. d2 += w*diff*diff;
  3625. }
  3626. if (d2 < best_score) {
  3627. best_score = d2;
  3628. grid_index = neighbours[j];
  3629. }
  3630. }
  3631. if (grid_index < 0) {
  3632. for (int i = 0; i < ngrid; ++i) {
  3633. const int8_t * grid_i = (const int8_t *)(grid + i);
  3634. float d2 = 0;
  3635. for (int j = 0; j < 8; ++j) {
  3636. float w = weight[j];
  3637. float q = xg[(grid_i[j] - 1)/2];
  3638. float diff = scale*q - xval[i];
  3639. d2 += w*diff*diff;
  3640. }
  3641. if (d2 < best_score) {
  3642. best_score = d2;
  3643. grid_index = i;
  3644. }
  3645. }
  3646. }
  3647. if (grid_index < 0) {
  3648. printf("Oops, did not find grid point\n");
  3649. printf("Have %d neighbours\n", num_neighbors);
  3650. for (int j = 1; j <= num_neighbors; ++j) {
  3651. const int8_t * pg = (const int8_t *)(grid + neighbours[j]);
  3652. float sumqx = 0, sumq2 = 0;
  3653. for (int i = 0; i < 8; ++i) {
  3654. float q = xg[(pg[i] - 1)/2];
  3655. float w = weight[i];
  3656. sumqx += w*q*xval[i];
  3657. sumq2 += w*q*q;
  3658. }
  3659. printf(" neighbour %d: sumqx = %g sumq2 = %g\n", j, (double)sumqx, (double)sumq2);
  3660. }
  3661. }
  3662. GGML_ASSERT(grid_index >= 0);
  3663. const int8_t * pg = (const int8_t *)(grid + grid_index);
  3664. for (int i = 0; i < 8; ++i) L[i] = (pg[i] - 1)/2;
  3665. return grid_index;
  3666. }
  3667. static int iq1_sort_helper(const void * left, const void * right) {
  3668. const float * l = left;
  3669. const float * r = right;
  3670. return *l < *r ? -1 : *l > *r ? 1 : 0;
  3671. }
  3672. #define IQ1S_BLOCK_SIZE 32
  3673. #define IQ1M_BLOCK_SIZE 16
  3674. static void quantize_row_iq1_s_impl(const float * restrict x, void * restrict vy, int64_t n, const float * restrict quant_weights,
  3675. float * scales,
  3676. float * weight,
  3677. float * sumx,
  3678. float * sumw,
  3679. float * pairs,
  3680. int8_t * L,
  3681. uint16_t * index,
  3682. int8_t * shifts) {
  3683. const int gindex = iq2_data_index(GGML_TYPE_IQ1_S);
  3684. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  3685. const int * kmap_q2xs = iq2_data[gindex].map;
  3686. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  3687. GGML_ASSERT(quant_weights && "missing quantization weights");
  3688. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  3689. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  3690. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  3691. GGML_ASSERT(n%QK_K == 0);
  3692. block_iq1_s * y = vy;
  3693. const int64_t nbl = n/QK_K;
  3694. const int block_size = IQ1S_BLOCK_SIZE;
  3695. const float x_p[3] = {-1 + IQ1S_DELTA, IQ1S_DELTA, 1 + IQ1S_DELTA};
  3696. const float x_m[3] = {-1 - IQ1S_DELTA, -IQ1S_DELTA, 1 - IQ1S_DELTA};
  3697. int * idx = (int *)(pairs + 1);
  3698. for (int ibl = 0; ibl < nbl; ++ibl) {
  3699. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  3700. memset(y[ibl].qs, 0, QK_K/8);
  3701. memset(y[ibl].qh, 0, QK_K/16);
  3702. float max_scale = 0;
  3703. const float * xbl = x + QK_K*ibl;
  3704. float sumx2 = 0;
  3705. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  3706. float sigma2 = 2*sumx2/QK_K;
  3707. for (int ib = 0; ib < QK_K/block_size; ++ib) {
  3708. const float * xb = xbl + block_size*ib;
  3709. const float * qw = quant_weights + QK_K*ibl + block_size*ib;
  3710. for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  3711. float max = fabsf(xb[0]);
  3712. for (int i = 1; i < block_size; ++i) max = MAX(max, fabsf(xb[i]));
  3713. if (max < GROUP_MAX_EPS_IQ1_S) {
  3714. scales[ib] = 0;
  3715. memset(L, 1, block_size);
  3716. continue;
  3717. }
  3718. // Here we solve exactly the sum of squared difference (SSD) weighted minimization problem.
  3719. // With just 3 allowed quant values (-1, 0, 1), we can search exhaustively for the two
  3720. // boundaries that split the weights xb[i] into 3 groups. To do so, we sort the weights
  3721. // in ascending order, compute Si = sum[weight[j] xb[j], j = 0...i] and
  3722. // Wi = sum[weight[j], j = 0...i], and use these to quckly get get the optimum scale
  3723. // for each possible and score for each split.
  3724. for (int j = 0; j < block_size; ++j) {
  3725. pairs[2*j] = xb[j];
  3726. idx[2*j] = j;
  3727. }
  3728. qsort(pairs, block_size, 2*sizeof(float), iq1_sort_helper);
  3729. {
  3730. sumx[0] = sumw[0] = 0;
  3731. for (int j = 0; j < block_size; ++j) {
  3732. int i = idx[2*j];
  3733. sumx[j+1] = sumx[j] + weight[i]*xb[i];
  3734. sumw[j+1] = sumw[j] + weight[i];
  3735. }
  3736. }
  3737. float best_score = -FLT_MIN, scale = max;
  3738. int besti1 = -1, besti2 = -1, best_shift = 0;
  3739. for (int i1 = 0; i1 <= block_size; ++i1) {
  3740. for (int i2 = i1; i2 <= block_size; ++i2) {
  3741. float sumqx = (sumx[i1] - sumx[0])*x_p[0] + (sumx[i2] - sumx[i1])*x_p[1] + (sumx[block_size] - sumx[i2])*x_p[2];
  3742. float sumq2 = (sumw[i1] - sumw[0])*x_p[0]*x_p[0] + (sumw[i2] - sumw[i1])*x_p[1]*x_p[1] + (sumw[block_size] - sumw[i2])*x_p[2]*x_p[2];
  3743. if (sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  3744. scale = sumqx/sumq2; best_score = scale*sumqx;
  3745. besti1 = i1; besti2 = i2; best_shift = 1;
  3746. }
  3747. sumqx = (sumx[i1] - sumx[0])*x_m[0] + (sumx[i2] - sumx[i1])*x_m[1] + (sumx[block_size] - sumx[i2])*x_m[2];
  3748. sumq2 = (sumw[i1] - sumw[0])*x_m[0]*x_m[0] + (sumw[i2] - sumw[i1])*x_m[1]*x_m[1] + (sumw[block_size] - sumw[i2])*x_m[2]*x_m[2];
  3749. if (sumq2 > 0 && sumqx*sumqx > best_score*sumq2) {
  3750. scale = sumqx/sumq2; best_score = scale*sumqx;
  3751. besti1 = i1; besti2 = i2; best_shift = -1;
  3752. }
  3753. }
  3754. }
  3755. GGML_ASSERT(besti1 >= 0 && besti2 >= 0 && best_shift != 0);
  3756. for (int j = 0; j < besti1; ++j) L[idx[2*j]] = 0;
  3757. for (int j = besti1; j < besti2; ++j) L[idx[2*j]] = 1;
  3758. for (int j = besti2; j < block_size; ++j) L[idx[2*j]] = 2;
  3759. if (scale < 0) {
  3760. for (int j = 0; j < block_size; ++j) L[j] = 2 - L[j];
  3761. scale = -scale; best_shift = -best_shift;
  3762. }
  3763. bool all_on_grid = true;
  3764. const float * xx = best_shift == 1 ? x_p : x_m;
  3765. for (int k = 0; k < block_size/8; ++k) {
  3766. uint16_t u = 0;
  3767. for (int j = 0; j < 8; ++j) u |= (L[8*k+j] << 2*j);
  3768. int grid_index = kmap_q2xs[u];
  3769. if (grid_index < 0) {
  3770. all_on_grid = false;
  3771. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  3772. grid_index = iq1_find_best_neighbour2(neighbours, kgrid_q2xs, xb + 8*k, weight + 8*k, scale, xx, L + 8*k, NGRID_IQ1S);
  3773. GGML_ASSERT(grid_index >= 0);
  3774. }
  3775. index[k] = grid_index;
  3776. }
  3777. if (!all_on_grid) {
  3778. float sumqx = 0, sumq2 = 0;
  3779. for (int k = 0; k < block_size/8; ++k) {
  3780. const int8_t * pg = (const int8_t *)(kgrid_q2xs + index[k]);
  3781. for (int j = 0; j < 8; ++j) {
  3782. float w = weight[8*k + j];
  3783. float q = xx[(pg[j] - 1)/2];
  3784. sumqx += w*q*xb[8*k+j];
  3785. sumq2 += w*q*q;
  3786. }
  3787. }
  3788. if (sumqx > 0 && sumq2 > 0) scale = sumqx/sumq2;
  3789. }
  3790. uint16_t h = 0;
  3791. for (int k = 0; k < block_size/8; ++k) {
  3792. y[ibl].qs[(block_size/8)*ib + k] = index[k] & 255;
  3793. h |= (index[k] >> 8) << 3*k;
  3794. }
  3795. y[ibl].qh[ib] = h;
  3796. GGML_ASSERT(scale >= 0);
  3797. scales[ib] = scale;
  3798. shifts[ib] = best_shift;
  3799. max_scale = MAX(max_scale, scale);
  3800. }
  3801. if (!max_scale) {
  3802. continue;
  3803. }
  3804. float d = max_scale/15;
  3805. y[ibl].d = GGML_FP32_TO_FP16(d*1.125f); // 1.125f is another fudge factor. Don't ask me why it is needed.
  3806. float id = 1/d;
  3807. for (int ib = 0; ib < QK_K/block_size; ++ib) {
  3808. int l = nearest_int(0.5f*(id*scales[ib]-1));
  3809. l = MAX(0, MIN(7, l));
  3810. if (shifts[ib] == -1) l |= 8;
  3811. y[ibl].qh[ib] |= (l << 12);
  3812. }
  3813. }
  3814. }
  3815. size_t quantize_iq1_s(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  3816. GGML_ASSERT(n_per_row%QK_K == 0);
  3817. float scales[QK_K/IQ1S_BLOCK_SIZE];
  3818. float weight[IQ1S_BLOCK_SIZE];
  3819. int8_t L[IQ1S_BLOCK_SIZE];
  3820. float sumx[IQ1S_BLOCK_SIZE+1];
  3821. float sumw[IQ1S_BLOCK_SIZE+1];
  3822. float pairs[2*IQ1S_BLOCK_SIZE];
  3823. uint16_t index[IQ1S_BLOCK_SIZE/8];
  3824. int8_t shifts[QK_K/IQ1S_BLOCK_SIZE];
  3825. int64_t nblock = n_per_row/QK_K;
  3826. char * qrow = (char *)dst;
  3827. for (int64_t row = 0; row < nrow; ++row) {
  3828. quantize_row_iq1_s_impl(src, qrow, n_per_row, quant_weights, scales, weight, sumx, sumw, pairs, L, index, shifts);
  3829. src += n_per_row;
  3830. qrow += nblock*sizeof(block_iq1_s);
  3831. }
  3832. return nrow * nblock * sizeof(block_iq1_s);
  3833. }
  3834. static void quantize_row_iq1_m_impl(const float * restrict x, void * restrict vy, int64_t n, const float * restrict quant_weights,
  3835. float * scales,
  3836. float * weight,
  3837. float * pairs,
  3838. int8_t * L,
  3839. uint16_t * index,
  3840. int8_t * shifts) {
  3841. const int gindex = iq2_data_index(GGML_TYPE_IQ1_M);
  3842. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  3843. const int * kmap_q2xs = iq2_data[gindex].map;
  3844. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  3845. //GGML_ASSERT(quant_weights && "missing quantization weights");
  3846. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  3847. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  3848. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  3849. GGML_ASSERT(n%QK_K == 0);
  3850. block_iq1_m * y = vy;
  3851. const int64_t nbl = n/QK_K;
  3852. const int block_size = IQ1M_BLOCK_SIZE;
  3853. const float x_p[3] = {-1 + IQ1M_DELTA, IQ1M_DELTA, 1 + IQ1M_DELTA};
  3854. const float x_m[3] = {-1 - IQ1M_DELTA, -IQ1M_DELTA, 1 - IQ1M_DELTA};
  3855. const uint8_t masks[4] = {0x00, 0x80, 0x08, 0x88};
  3856. int * idx = (int *)(pairs + 1);
  3857. float sumqx[4], sumq2[4];
  3858. iq1m_scale_t s;
  3859. const float * xx;
  3860. for (int ibl = 0; ibl < nbl; ++ibl) {
  3861. memset(y[ibl].qs, 0, QK_K/8);
  3862. memset(y[ibl].qh, 0, QK_K/16);
  3863. memset(y[ibl].scales, 0, QK_K/32);
  3864. float max_scale = 0;
  3865. const float * xbl = x + QK_K*ibl;
  3866. float sumx2 = 0;
  3867. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  3868. float sigma2 = 2*sumx2/QK_K;
  3869. for (int ib = 0; ib < QK_K/block_size; ++ib) {
  3870. const float * xb = xbl + block_size*ib;
  3871. if (quant_weights) {
  3872. const float * qw = quant_weights + QK_K*ibl + block_size*ib;
  3873. for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  3874. } else {
  3875. for (int i = 0; i < block_size; ++i) weight[i] = xb[i]*xb[i];
  3876. }
  3877. float max = fabsf(xb[0]);
  3878. for (int i = 1; i < block_size; ++i) max = MAX(max, fabsf(xb[i]));
  3879. if (max < GROUP_MAX_EPS_IQ1_M) {
  3880. scales[ib] = 0;
  3881. memset(L, 1, block_size);
  3882. continue;
  3883. }
  3884. // Here we solve exactly the sum of squared difference (SSD) weighted minimization problem.
  3885. // With just 3 allowed quant values (-1, 0, 1), we can search exhaustively for the two
  3886. // boundaries that split the weights xb[i] into 3 groups. To do so, we sort the weights
  3887. // in ascending order, compute Si = sum[weight[j] xb[j], j = 0...i] and
  3888. // Wi = sum[weight[j], j = 0...i], and use these to quckly get get the optimum scale
  3889. // for each possible and score for each split.
  3890. for (int j = 0; j < block_size; ++j) {
  3891. pairs[2*j] = xb[j];
  3892. idx[2*j] = j;
  3893. }
  3894. qsort(pairs, block_size, 2*sizeof(float), iq1_sort_helper);
  3895. float best_score = -FLT_MIN, scale = max;
  3896. int besti1 = -1, besti2 = -1, best_k = -1;
  3897. // 0: +, +
  3898. // 1: +, -
  3899. // 2: -, +
  3900. // 3: -, -
  3901. for (int i1 = 0; i1 <= block_size; ++i1) {
  3902. for (int i2 = i1; i2 <= block_size; ++i2) {
  3903. memset(sumqx, 0, 4*sizeof(float));
  3904. memset(sumq2, 0, 4*sizeof(float));
  3905. for (int j = 0; j < i1; ++j) {
  3906. int i = idx[2*j];
  3907. if (i < block_size/2) {
  3908. sumqx[0] += weight[i]*x_p[0]*xb[i];
  3909. sumqx[1] += weight[i]*x_p[0]*xb[i];
  3910. sumqx[2] += weight[i]*x_m[0]*xb[i];
  3911. sumqx[3] += weight[i]*x_m[0]*xb[i];
  3912. sumq2[0] += weight[i]*x_p[0]*x_p[0];
  3913. sumq2[1] += weight[i]*x_p[0]*x_p[0];
  3914. sumq2[2] += weight[i]*x_m[0]*x_m[0];
  3915. sumq2[3] += weight[i]*x_m[0]*x_m[0];
  3916. } else {
  3917. sumqx[0] += weight[i]*x_p[0]*xb[i];
  3918. sumqx[2] += weight[i]*x_p[0]*xb[i];
  3919. sumqx[1] += weight[i]*x_m[0]*xb[i];
  3920. sumqx[3] += weight[i]*x_m[0]*xb[i];
  3921. sumq2[0] += weight[i]*x_p[0]*x_p[0];
  3922. sumq2[2] += weight[i]*x_p[0]*x_p[0];
  3923. sumq2[1] += weight[i]*x_m[0]*x_m[0];
  3924. sumq2[3] += weight[i]*x_m[0]*x_m[0];
  3925. }
  3926. }
  3927. for (int j = i1; j < i2; ++j) {
  3928. int i = idx[2*j];
  3929. if (i < block_size/2) {
  3930. sumqx[0] += weight[i]*x_p[1]*xb[i];
  3931. sumqx[1] += weight[i]*x_p[1]*xb[i];
  3932. sumqx[2] += weight[i]*x_m[1]*xb[i];
  3933. sumqx[3] += weight[i]*x_m[1]*xb[i];
  3934. sumq2[0] += weight[i]*x_p[1]*x_p[1];
  3935. sumq2[1] += weight[i]*x_p[1]*x_p[1];
  3936. sumq2[2] += weight[i]*x_m[1]*x_m[1];
  3937. sumq2[3] += weight[i]*x_m[1]*x_m[1];
  3938. } else {
  3939. sumqx[0] += weight[i]*x_p[1]*xb[i];
  3940. sumqx[2] += weight[i]*x_p[1]*xb[i];
  3941. sumqx[1] += weight[i]*x_m[1]*xb[i];
  3942. sumqx[3] += weight[i]*x_m[1]*xb[i];
  3943. sumq2[0] += weight[i]*x_p[1]*x_p[1];
  3944. sumq2[2] += weight[i]*x_p[1]*x_p[1];
  3945. sumq2[1] += weight[i]*x_m[1]*x_m[1];
  3946. sumq2[3] += weight[i]*x_m[1]*x_m[1];
  3947. }
  3948. }
  3949. for (int j = i2; j < block_size; ++j) {
  3950. int i = idx[2*j];
  3951. if (i < block_size/2) {
  3952. sumqx[0] += weight[i]*x_p[2]*xb[i];
  3953. sumqx[1] += weight[i]*x_p[2]*xb[i];
  3954. sumqx[2] += weight[i]*x_m[2]*xb[i];
  3955. sumqx[3] += weight[i]*x_m[2]*xb[i];
  3956. sumq2[0] += weight[i]*x_p[2]*x_p[2];
  3957. sumq2[1] += weight[i]*x_p[2]*x_p[2];
  3958. sumq2[2] += weight[i]*x_m[2]*x_m[2];
  3959. sumq2[3] += weight[i]*x_m[2]*x_m[2];
  3960. } else {
  3961. sumqx[0] += weight[i]*x_p[2]*xb[i];
  3962. sumqx[2] += weight[i]*x_p[2]*xb[i];
  3963. sumqx[1] += weight[i]*x_m[2]*xb[i];
  3964. sumqx[3] += weight[i]*x_m[2]*xb[i];
  3965. sumq2[0] += weight[i]*x_p[2]*x_p[2];
  3966. sumq2[2] += weight[i]*x_p[2]*x_p[2];
  3967. sumq2[1] += weight[i]*x_m[2]*x_m[2];
  3968. sumq2[3] += weight[i]*x_m[2]*x_m[2];
  3969. }
  3970. }
  3971. for (int k = 0; k < 4; ++k) {
  3972. if (sumq2[k] > 0 && sumqx[k]*sumqx[k] > best_score*sumq2[k]) {
  3973. scale = sumqx[k]/sumq2[k]; best_score = scale*sumqx[k];
  3974. besti1 = i1; besti2 = i2; best_k = k;
  3975. }
  3976. }
  3977. }
  3978. }
  3979. GGML_ASSERT(besti1 >= 0 && besti2 >= 0 && best_k >= 0);
  3980. for (int j = 0; j < besti1; ++j) L[idx[2*j]] = 0;
  3981. for (int j = besti1; j < besti2; ++j) L[idx[2*j]] = 1;
  3982. for (int j = besti2; j < block_size; ++j) L[idx[2*j]] = 2;
  3983. if (scale < 0) {
  3984. for (int j = 0; j < block_size; ++j) L[j] = 2 - L[j];
  3985. scale = -scale;
  3986. best_k = best_k == 0 ? 3 : best_k == 1 ? 2 : best_k == 2 ? 1 : 0;
  3987. }
  3988. bool all_on_grid = true;
  3989. for (int k = 0; k < block_size/8; ++k) {
  3990. if (k == 0) xx = best_k < 2 ? x_p : x_m;
  3991. else xx = best_k%2 == 0 ? x_p : x_m;
  3992. uint16_t u = 0;
  3993. for (int j = 0; j < 8; ++j) u |= (L[8*k+j] << 2*j);
  3994. int grid_index = kmap_q2xs[u];
  3995. if (grid_index < 0) {
  3996. all_on_grid = false;
  3997. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  3998. grid_index = iq1_find_best_neighbour2(neighbours, kgrid_q2xs, xb + 8*k, weight + 8*k, scale, xx, L + 8*k, NGRID_IQ1S);
  3999. GGML_ASSERT(grid_index >= 0);
  4000. }
  4001. index[k] = grid_index;
  4002. }
  4003. if (!all_on_grid) {
  4004. float sumqx_f = 0, sumq2_f = 0;
  4005. for (int k = 0; k < block_size/8; ++k) {
  4006. if (k == 0) xx = best_k < 2 ? x_p : x_m;
  4007. else xx = best_k%2 == 0 ? x_p : x_m;
  4008. const int8_t * pg = (const int8_t *)(kgrid_q2xs + index[k]);
  4009. for (int j = 0; j < 8; ++j) {
  4010. float w = weight[8*k + j];
  4011. float q = xx[(pg[j] - 1)/2];
  4012. sumqx_f += w*q*xb[8*k+j];
  4013. sumq2_f += w*q*q;
  4014. }
  4015. }
  4016. if (sumqx_f > 0 && sumq2_f > 0) scale = sumqx_f/sumq2_f;
  4017. }
  4018. y[ibl].qs[2*ib + 0] = index[0] & 255;
  4019. y[ibl].qs[2*ib + 1] = index[1] & 255;
  4020. y[ibl].qh[ib] = (index[0] >> 8) | ((index[1] >> 8) << 4);
  4021. GGML_ASSERT(scale >= 0);
  4022. scales[ib] = scale;
  4023. shifts[ib] = best_k;
  4024. max_scale = MAX(max_scale, scale);
  4025. }
  4026. if (!max_scale) {
  4027. continue;
  4028. }
  4029. uint16_t * sc = (uint16_t *)y[ibl].scales;
  4030. float d = max_scale/15;
  4031. float id = 1/d;
  4032. float sumqx_f = 0, sumq2_f = 0;
  4033. for (int ib = 0; ib < QK_K/block_size; ++ib) {
  4034. int l = nearest_int(0.5f*(id*scales[ib+0]-1));
  4035. l = MAX(0, MIN(7, l));
  4036. sc[ib/4] |= (l << 3*(ib%4));
  4037. y[ibl].qh[ib] |= masks[shifts[ib]];
  4038. const float * xb = xbl + block_size*ib;
  4039. if (quant_weights) {
  4040. const float * qw = quant_weights + QK_K*ibl + block_size*ib;
  4041. for (int i = 0; i < block_size; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  4042. } else {
  4043. for (int i = 0; i < block_size; ++i) weight[i] = xb[i]*xb[i];
  4044. }
  4045. for (int k = 0; k < block_size/8; ++k) {
  4046. if (k == 0) xx = shifts[ib] < 2 ? x_p : x_m;
  4047. else xx = shifts[ib]%2 == 0 ? x_p : x_m;
  4048. const int8_t * pg = (const int8_t *)(kgrid_q2xs + y[ibl].qs[2*ib+k] + ((y[ibl].qh[ib] << (8 - 4*k)) & 0x700));
  4049. for (int j = 0; j < 8; ++j) {
  4050. float w = weight[8*k + j];
  4051. float q = xx[(pg[j] - 1)/2]*(2*l+1);
  4052. sumqx_f += w*q*xb[8*k+j];
  4053. sumq2_f += w*q*q;
  4054. }
  4055. }
  4056. }
  4057. if (sumq2_f > 0) d = sumqx_f/sumq2_f;
  4058. s.f16 = GGML_FP32_TO_FP16(d*1.1125f); // 1.1125f is another fudge factor. Don't ask me why it is needed.
  4059. sc[0] |= ((s.u16 & 0x000f) << 12);
  4060. sc[1] |= ((s.u16 & 0x00f0) << 8);
  4061. sc[2] |= ((s.u16 & 0x0f00) << 4);
  4062. sc[3] |= ((s.u16 & 0xf000) << 0);
  4063. }
  4064. }
  4065. size_t quantize_iq1_m(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  4066. GGML_ASSERT(n_per_row%QK_K == 0);
  4067. float scales[QK_K/IQ1M_BLOCK_SIZE];
  4068. float weight[IQ1M_BLOCK_SIZE];
  4069. int8_t L[IQ1M_BLOCK_SIZE];
  4070. float pairs[2*IQ1M_BLOCK_SIZE];
  4071. uint16_t index[IQ1M_BLOCK_SIZE/8];
  4072. int8_t shifts[QK_K/IQ1M_BLOCK_SIZE];
  4073. int64_t nblock = n_per_row/QK_K;
  4074. char * qrow = (char *)dst;
  4075. for (int64_t row = 0; row < nrow; ++row) {
  4076. quantize_row_iq1_m_impl(src, qrow, n_per_row, quant_weights, scales, weight, pairs, L, index, shifts);
  4077. src += n_per_row;
  4078. qrow += nblock*sizeof(block_iq1_m);
  4079. }
  4080. return nrow * nblock * sizeof(block_iq1_m);
  4081. }
  4082. // ============================ 4-bit non-linear quants
  4083. static inline int best_index_int8(int n, const int8_t * val, float x) {
  4084. if (x <= val[0]) return 0;
  4085. if (x >= val[n-1]) return n-1;
  4086. int ml = 0, mu = n-1;
  4087. while (mu-ml > 1) {
  4088. int mav = (ml+mu)/2;
  4089. if (x < val[mav]) mu = mav; else ml = mav;
  4090. }
  4091. return x - val[mu-1] < val[mu] - x ? mu-1 : mu;
  4092. }
  4093. static void quantize_row_iq4_nl_impl(const int super_block_size, const int block_size, const float * restrict x,
  4094. ggml_fp16_t * dh, uint8_t * q4, uint16_t * scales_h, uint8_t * scales_l,
  4095. float * scales, float * weight, uint8_t * L,
  4096. const int8_t * values,
  4097. const float * quant_weights,
  4098. const int ntry) {
  4099. float sigma2 = 0;
  4100. for (int j = 0; j < super_block_size; ++j) sigma2 += x[j]*x[j];
  4101. sigma2 *= 2.f/super_block_size;
  4102. memset(q4, 0, super_block_size/2);
  4103. dh[0] = GGML_FP32_TO_FP16(0.f);
  4104. float max_scale = 0, amax_scale = 0;
  4105. for (int ib = 0; ib < super_block_size/block_size; ++ib) {
  4106. const float * xb = x + ib*block_size;
  4107. uint8_t * Lb = L + ib*block_size;
  4108. if (quant_weights) {
  4109. const float * qw = quant_weights + ib*block_size;
  4110. for (int j = 0; j < block_size; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
  4111. } else {
  4112. for (int j = 0; j < block_size; ++j) weight[j] = xb[j]*xb[j];
  4113. }
  4114. float amax = 0, max = 0;
  4115. for (int j = 0; j < block_size; ++j) {
  4116. float ax = fabsf(xb[j]);
  4117. if (ax > amax) {
  4118. amax = ax; max = xb[j];
  4119. }
  4120. }
  4121. if (amax < GROUP_MAX_EPS) {
  4122. scales[ib] = 0;
  4123. continue;
  4124. }
  4125. float d = ntry > 0 ? -max/values[0] : max/values[0];
  4126. float id = 1/d;
  4127. float sumqx = 0, sumq2 = 0;
  4128. for (int j = 0; j < block_size; ++j) {
  4129. float al = id*xb[j];
  4130. int l = best_index_int8(16, values, al);
  4131. Lb[j] = l;
  4132. float q = values[l];
  4133. float w = weight[j];
  4134. sumqx += w*q*xb[j];
  4135. sumq2 += w*q*q;
  4136. }
  4137. d = sumqx/sumq2;
  4138. float best = d*sumqx;
  4139. for (int itry = -ntry; itry <= ntry; ++itry) {
  4140. id = (itry + values[0])/max;
  4141. sumqx = sumq2 = 0;
  4142. for (int j = 0; j < block_size; ++j) {
  4143. float al = id*xb[j];
  4144. int l = best_index_int8(16, values, al);
  4145. float q = values[l];
  4146. float w = weight[j];
  4147. sumqx += w*q*xb[j];
  4148. sumq2 += w*q*q;
  4149. }
  4150. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  4151. d = sumqx/sumq2; best = d * sumqx;
  4152. }
  4153. }
  4154. scales[ib] = d;
  4155. float abs_d = fabsf(d);
  4156. if (abs_d > amax_scale) {
  4157. amax_scale = abs_d; max_scale = d;
  4158. }
  4159. }
  4160. if (super_block_size/block_size > 1) {
  4161. int nb = super_block_size/block_size;
  4162. memset(scales_h, 0, ((nb+7)/8)*sizeof(uint16_t));
  4163. float d = -max_scale/32;
  4164. dh[0] = GGML_FP32_TO_FP16(d);
  4165. float id = d ? 1/d : 0.f;
  4166. for (int ib = 0; ib < super_block_size/block_size; ++ib) {
  4167. int l = nearest_int(id*scales[ib]);
  4168. l = MAX(-32, MIN(31, l));
  4169. float dl = d * l;
  4170. float idl = dl ? 1/dl : 0.f;
  4171. uint8_t * Lb = L + ib*block_size;
  4172. const float * xb = x + ib*block_size;
  4173. for (int j = 0; j < block_size; ++j) {
  4174. Lb[j] = best_index_int8(16, values, idl*xb[j]);
  4175. }
  4176. l += 32;
  4177. uint8_t l_l = l & 0xf;
  4178. uint8_t l_h = l >> 4;
  4179. if (ib%2 == 0) scales_l[ib/2] = l_l;
  4180. else scales_l[ib/2] |= (l_l << 4);
  4181. scales_h[ib/8] |= (l_h << 2*(ib%8));
  4182. }
  4183. } else {
  4184. dh[0] = GGML_FP32_TO_FP16(scales[0]);
  4185. if (ntry > 0) {
  4186. float id = scales[0] ? 1/scales[0] : 0;
  4187. for (int j = 0; j < super_block_size; ++j) {
  4188. L[j] = best_index_int8(16, values, id*x[j]);
  4189. }
  4190. }
  4191. }
  4192. for (int i = 0; i < super_block_size/32; ++i) {
  4193. for (int j = 0; j < 16; ++j) {
  4194. q4[16*i + j] = L[32*i + j] | (L[32*i + 16 + j] << 4);
  4195. }
  4196. }
  4197. }
  4198. size_t quantize_iq4_nl(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  4199. GGML_ASSERT(n_per_row%QK4_NL == 0);
  4200. int64_t nblock = n_per_row/QK4_NL;
  4201. char * qrow = (char *)dst;
  4202. uint8_t L[QK4_NL];
  4203. float weight[QK4_NL];
  4204. uint16_t unused_h;
  4205. uint8_t * unused_l = NULL;
  4206. float scale;
  4207. for (int64_t row = 0; row < nrow; ++row) {
  4208. block_iq4_nl * iq4 = (block_iq4_nl *)qrow;
  4209. for (int ibl = 0; ibl < nblock; ++ibl) {
  4210. const float * qw = quant_weights ? quant_weights + QK4_NL*ibl : NULL;
  4211. quantize_row_iq4_nl_impl(QK4_NL, 32, src + QK4_NL*ibl, &iq4[ibl].d, iq4[ibl].qs, &unused_h, unused_l,
  4212. &scale, weight, L, kvalues_iq4nl, qw, 7);
  4213. }
  4214. src += n_per_row;
  4215. qrow += nblock*sizeof(block_iq4_nl);
  4216. }
  4217. return nrow * nblock * sizeof(block_iq4_nl);
  4218. }
  4219. //void quantize_row_iq4_nl_ref(const float * restrict x, void * restrict vy, int64_t k) {
  4220. void quantize_row_iq4_nl_ref(const float * restrict x, block_iq4_nl * restrict y, int64_t k) {
  4221. GGML_ASSERT(k%QK4_NL == 0);
  4222. int64_t nblock = k/QK4_NL;
  4223. uint8_t L[QK4_NL];
  4224. float weight[QK4_NL];
  4225. uint16_t unused_h;
  4226. uint8_t * unused_l = NULL;
  4227. float scale;
  4228. block_iq4_nl * iq4 = y;
  4229. for (int ibl = 0; ibl < nblock; ++ibl) {
  4230. quantize_row_iq4_nl_impl(QK4_NL, 32, x + QK4_NL*ibl, &iq4[ibl].d, iq4[ibl].qs, &unused_h, unused_l,
  4231. &scale, weight, L, kvalues_iq4nl, NULL, -1);
  4232. }
  4233. }
  4234. size_t quantize_iq4_xs(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  4235. GGML_ASSERT(n_per_row%QK_K == 0);
  4236. int64_t nblock = n_per_row/QK_K;
  4237. char * qrow = (char *)dst;
  4238. uint8_t L[QK_K];
  4239. float weight[32];
  4240. float scales[QK_K/32];
  4241. for (int64_t row = 0; row < nrow; ++row) {
  4242. block_iq4_xs * iq4 = (block_iq4_xs *)qrow;
  4243. for (int ibl = 0; ibl < nblock; ++ibl) {
  4244. const float * qw = quant_weights ? quant_weights + QK_K*ibl : NULL;
  4245. quantize_row_iq4_nl_impl(QK_K, 32, src + QK_K*ibl, &iq4[ibl].d, iq4[ibl].qs, &iq4[ibl].scales_h, iq4[ibl].scales_l,
  4246. scales, weight, L, kvalues_iq4nl, qw, 7);
  4247. }
  4248. src += n_per_row;
  4249. qrow += nblock*sizeof(block_iq4_xs);
  4250. }
  4251. return nrow * nblock * sizeof(block_iq4_xs);
  4252. }
  4253. void quantize_row_iq4_xs_ref(const float * restrict x, block_iq4_xs * restrict y, int64_t k) {
  4254. assert(k % QK_K == 0);
  4255. quantize_iq4_xs(x, y, 1, k, NULL);
  4256. }
  4257. // =============================== 2.5625 bpw
  4258. static void quantize_row_iq2_s_impl(const float * restrict x, void * restrict vy, int64_t n, const float * restrict quant_weights) {
  4259. const int gindex = iq2_data_index(GGML_TYPE_IQ2_S);
  4260. const uint64_t * kgrid_q2xs = iq2_data[gindex].grid;
  4261. const int * kmap_q2xs = iq2_data[gindex].map;
  4262. const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
  4263. GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
  4264. GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
  4265. GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
  4266. GGML_ASSERT(n%QK_K == 0);
  4267. const int kMaxQ = 3;
  4268. const int64_t nbl = n/QK_K;
  4269. block_iq2_s * y = vy;
  4270. float scales[QK_K/16];
  4271. float weight[16];
  4272. float xval[16];
  4273. int8_t L[16];
  4274. int8_t Laux[16];
  4275. float waux[16];
  4276. bool is_on_grid[2];
  4277. bool is_on_grid_aux[2];
  4278. uint8_t block_signs[2];
  4279. for (int ibl = 0; ibl < nbl; ++ibl) {
  4280. memset(&y[ibl], 0, sizeof(block_iq2_s));
  4281. y[ibl].d = GGML_FP32_TO_FP16(0.f);
  4282. float max_scale = 0;
  4283. const float * xbl = x + QK_K*ibl;
  4284. float sumx2 = 0;
  4285. for (int i = 0; i < QK_K; ++i) sumx2 += xbl[i]*xbl[i];
  4286. float sigma2 = 2*sumx2/QK_K;
  4287. for (int ib = 0; ib < QK_K/16; ++ib) {
  4288. const float * xb = xbl + 16*ib;
  4289. if (quant_weights) {
  4290. const float * qw = quant_weights + QK_K*ibl + 16*ib;
  4291. for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
  4292. } else {
  4293. for (int i = 0; i < 16; ++i) weight[i] = 0.25f*sigma2 + xb[i]*xb[i];
  4294. }
  4295. for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]);
  4296. for (int k = 0; k < 2; ++k) {
  4297. uint8_t s = 0;
  4298. for (int i = 0; i < 8; ++i) {
  4299. if (xb[8*k + i] >= 0) xval[8*k + i] = xb[8*k + i];
  4300. else {
  4301. xval[8*k + i] = -xb[8*k + i]; s |= (1 << i);
  4302. }
  4303. }
  4304. block_signs[k] = s;
  4305. }
  4306. float max = xval[0];
  4307. for (int i = 1; i < 16; ++i) max = MAX(max, xval[i]);
  4308. if (max < GROUP_MAX_EPS_IQ2_S) {
  4309. scales[ib] = 0;
  4310. continue;
  4311. }
  4312. float best = 0;
  4313. float scale = max/(2*kMaxQ-1);
  4314. is_on_grid[0] = is_on_grid[1] = true;
  4315. for (int is = -9; is <= 9; ++is) {
  4316. float id = (2*kMaxQ-1+is*0.1f)/max;
  4317. float this_scale = 1/id;
  4318. for (int k = 0; k < 2; ++k) {
  4319. for (int i = 0; i < 8; ++i) {
  4320. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  4321. Laux[8*k+i] = MAX(0, MIN(kMaxQ-1, l));
  4322. }
  4323. uint16_t u = 0;
  4324. for (int i = 0; i < 8; ++i) u |= (Laux[8*k+i] << 2*i);
  4325. int grid_index = kmap_q2xs[u];
  4326. is_on_grid_aux[k] = true;
  4327. if (grid_index < 0) {
  4328. is_on_grid_aux[k] = false;
  4329. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  4330. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, this_scale, Laux + 8*k);
  4331. }
  4332. }
  4333. float sumqx = 0, sumq2 = 0;
  4334. for (int i = 0; i < 16; ++i) {
  4335. float w = weight[i];
  4336. float q = 2*Laux[i] + 1;
  4337. sumqx += w*xval[i]*q;
  4338. sumq2 += w*q*q;
  4339. }
  4340. if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
  4341. scale = sumqx/sumq2; best = scale*sumqx;
  4342. for (int i = 0; i < 16; ++i) L[i] = Laux[i];
  4343. for (int k = 0; k < 2; ++k) is_on_grid[k] = is_on_grid_aux[k];
  4344. }
  4345. }
  4346. int n_not_ongrid = 0;
  4347. for (int k = 0; k < 2; ++k) if (!is_on_grid[k]) ++n_not_ongrid;
  4348. if (n_not_ongrid > 0 && scale > 0) {
  4349. float id = 1/scale;
  4350. for (int k = 0; k < 2; ++k) {
  4351. if (is_on_grid[k]) continue;
  4352. uint16_t u = 0;
  4353. for (int i = 0; i < 8; ++i) {
  4354. int l = nearest_int(0.5f*(id*xval[8*k+i]-1));
  4355. l = MAX(0, MIN(kMaxQ-1, l));
  4356. u |= (l << 2*i);
  4357. L[8*k + i] = l;
  4358. }
  4359. int grid_index = kmap_q2xs[u];
  4360. if (grid_index < 0) {
  4361. const uint16_t * neighbours = kneighbors_q2xs - kmap_q2xs[u] - 1;
  4362. grid_index = iq2_find_best_neighbour(neighbours, kgrid_q2xs, xval + 8*k, waux + 8*k, scale, L + 8*k);
  4363. }
  4364. }
  4365. float sumqx = 0, sumq2 = 0;
  4366. for (int i = 0; i < 16; ++i) {
  4367. float w = weight[i];
  4368. float q = 2*L[i] + 1;
  4369. sumqx += w*xval[i]*q;
  4370. sumq2 += w*q*q;
  4371. }
  4372. if (sumq2 > 0) scale = sumqx/sumq2;
  4373. }
  4374. if (scale < 0) {
  4375. scale = -scale;
  4376. for (int k = 0; k < 2; ++k) block_signs[k] = ~block_signs[k];
  4377. }
  4378. for (int k = 0; k < 2; ++k) {
  4379. uint16_t u = 0;
  4380. for (int i = 0; i < 8; ++i) u |= (L[8*k+i] << 2*i);
  4381. int grid_index = kmap_q2xs[u];
  4382. if (grid_index < 0) {
  4383. printf("Oops: found point %u not on grid:", u);
  4384. for (int i = 0; i < 8; ++i) printf(" %d", L[8*k+i]);
  4385. printf("\n");
  4386. GGML_ABORT("fatal error");
  4387. }
  4388. const int i8 = 2*ib + k;
  4389. y[ibl].qs[i8] = grid_index & 255;
  4390. y[ibl].qh[i8/4] |= ((grid_index >> 8) << 2*(i8%4));
  4391. y[ibl].qs[QK_K/8 + i8] = block_signs[k];
  4392. }
  4393. GGML_ASSERT(scale >= 0);
  4394. scales[ib] = scale;
  4395. max_scale = MAX(max_scale, scale);
  4396. }
  4397. if (!max_scale) {
  4398. continue;
  4399. }
  4400. float d = max_scale/31;
  4401. y[ibl].d = GGML_FP32_TO_FP16(d * 0.9875f);
  4402. float id = 1/d;
  4403. for (int ib = 0; ib < QK_K/16; ++ib) {
  4404. int l = nearest_int(0.5f*(id*scales[ib]-1));
  4405. l = MAX(0, MIN(15, l));
  4406. if (ib%2 == 0) y[ibl].scales[ib/2] = l;
  4407. else y[ibl].scales[ib/2] |= (l << 4);
  4408. }
  4409. }
  4410. }
  4411. size_t quantize_iq2_s(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
  4412. GGML_ASSERT(n_per_row%QK_K == 0);
  4413. int64_t nblock = n_per_row/QK_K;
  4414. char * qrow = (char *)dst;
  4415. for (int64_t row = 0; row < nrow; ++row) {
  4416. quantize_row_iq2_s_impl(src, qrow, n_per_row, quant_weights);
  4417. src += n_per_row;
  4418. qrow += nblock*sizeof(block_iq2_s);
  4419. }
  4420. return nrow * nblock * sizeof(block_iq2_s);
  4421. }
  4422. void quantize_row_iq2_s_ref(const float * restrict x, block_iq2_s * restrict y, int64_t k) {
  4423. assert(k % QK_K == 0);
  4424. quantize_iq2_s(x, y, 1, k, NULL);
  4425. }
  4426. // =============================== data validation
  4427. static bool validate_float(float f, size_t i) {
  4428. if (isinf(f)) {
  4429. fprintf(stderr, "ggml_validate_row_data: found inf value at block %zu\n", i);
  4430. return false;
  4431. }
  4432. if (isnan(f)) {
  4433. fprintf(stderr, "ggml_validate_row_data: found nan value at block %zu\n", i);
  4434. return false;
  4435. }
  4436. return true;
  4437. }
  4438. static bool isinf_fp16(ggml_fp16_t f) {
  4439. return (f & 0x7c00) == 0x7c00 && (f & 0x03ff) == 0;
  4440. }
  4441. static bool isnan_fp16(ggml_fp16_t f) {
  4442. return (f & 0x7c00) == 0x7c00 && (f & 0x03ff) != 0;
  4443. }
  4444. static bool validate_fp16(ggml_fp16_t f, size_t i) {
  4445. if (isinf_fp16(f)) {
  4446. fprintf(stderr, "ggml_validate_row_data: found inf value at block %zu\n", i);
  4447. return false;
  4448. }
  4449. if (isnan_fp16(f)) {
  4450. fprintf(stderr, "ggml_validate_row_data: found nan value at block %zu\n", i);
  4451. return false;
  4452. }
  4453. return true;
  4454. }
  4455. #define VALIDATE_ROW_DATA_D_F16_IMPL(type, data, nb) \
  4456. const type * q = (const type *) (data); \
  4457. for (size_t i = 0; i < (nb); ++i) { \
  4458. if (!validate_fp16(q[i].d, i)) { \
  4459. return false; \
  4460. } \
  4461. }
  4462. #define VALIDATE_ROW_DATA_DM_F16_IMPL(type, data, nb, d, m) \
  4463. const type * q = (const type *) (data); \
  4464. for (size_t i = 0; i < (nb); ++i) { \
  4465. if (!validate_fp16(q[i].d, i) || !validate_fp16(q[i].m, i)) { \
  4466. return false; \
  4467. } \
  4468. }
  4469. #define VALIDATE_ROW_DATA_DVEC_F16_IMPL(type, data, nb, nr) \
  4470. const type * q = (const type *) (data); \
  4471. for (size_t i = 0; i < (nb); ++i) { \
  4472. for (size_t j = 0; j < (nr); ++j) { \
  4473. if (!validate_fp16(q[i].d[j], i)) { \
  4474. return false; \
  4475. } \
  4476. } \
  4477. }
  4478. bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes) {
  4479. if (type < 0 || type >= GGML_TYPE_COUNT) {
  4480. fprintf(stderr, "%s: invalid type %d\n", __func__, type);
  4481. return false;
  4482. }
  4483. if (nbytes % ggml_type_size(type) != 0) {
  4484. fprintf(stderr, "%s: invalid size %zu for type %s (type size = %zu)\n", __func__, nbytes, ggml_type_name(type), ggml_type_size(type));
  4485. return false;
  4486. }
  4487. const size_t nb = nbytes/ggml_type_size(type);
  4488. switch (type) {
  4489. case GGML_TYPE_BF16:
  4490. {
  4491. int nans = 0;
  4492. int infs = 0;
  4493. const unsigned short * f = (const unsigned short *) data;
  4494. for (size_t i = 0; i < nb; ++i) {
  4495. nans += (f[i] & 0x7fff) > 0x7f80;
  4496. infs += (f[i] & 0x7fff) == 0x7f80;
  4497. }
  4498. if (nans) {
  4499. fprintf(stderr, "%s: found %d NaNs in row of %zu BF16 values\n", __func__, nans, nb);
  4500. return false;
  4501. }
  4502. if (infs) {
  4503. fprintf(stderr, "%s: found %d infinities in row of %zu BF16 values\n", __func__, infs, nb);
  4504. return false;
  4505. }
  4506. } break;
  4507. case GGML_TYPE_F16:
  4508. {
  4509. const ggml_fp16_t * f = (const ggml_fp16_t *) data;
  4510. size_t i = 0;
  4511. #if defined(__AVX2__)
  4512. for (; i + 15 < nb; i += 16) {
  4513. __m256i v = _mm256_loadu_si256((const __m256i *)(f + i));
  4514. __m256i vexp = _mm256_and_si256(v, _mm256_set1_epi16(0x7c00));
  4515. __m256i cmp = _mm256_cmpeq_epi16(vexp, _mm256_set1_epi16(0x7c00));
  4516. int mask = _mm256_movemask_epi8(cmp);
  4517. if (mask) {
  4518. for (size_t j = 0; j < 16; ++j) {
  4519. if (!validate_fp16(f[i + j], i + j)) {
  4520. return false;
  4521. }
  4522. }
  4523. GGML_UNREACHABLE();
  4524. }
  4525. }
  4526. #elif defined(__ARM_NEON)
  4527. for (; i + 7 < nb; i += 8) {
  4528. uint16x8_t v = vld1q_u16(f + i);
  4529. uint16x8_t vexp = vandq_u16(v, vdupq_n_u16(0x7c00));
  4530. uint16x8_t cmp = vceqq_u16(vexp, vdupq_n_u16(0x7c00));
  4531. uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(vshrn_n_u16(cmp, 4)), 0);
  4532. if (mask) {
  4533. for (size_t j = 0; j < 8; ++j) {
  4534. if (!validate_fp16(f[i + j], i + j)) {
  4535. return false;
  4536. }
  4537. }
  4538. GGML_UNREACHABLE();
  4539. }
  4540. }
  4541. #endif
  4542. for (; i < nb; ++i) {
  4543. if (!validate_fp16(f[i], i)) {
  4544. return false;
  4545. }
  4546. }
  4547. } break;
  4548. case GGML_TYPE_F32:
  4549. {
  4550. const float * f = (const float *) data;
  4551. size_t i = 0;
  4552. #if defined(__AVX2__)
  4553. for (; i + 7 < nb; i += 8) {
  4554. __m256i v = _mm256_loadu_si256((const __m256i *)(f + i));
  4555. __m256i vexp = _mm256_and_si256(v, _mm256_set1_epi32(0x7f800000));
  4556. __m256i cmp = _mm256_cmpeq_epi32(vexp, _mm256_set1_epi32(0x7f800000));
  4557. int mask = _mm256_movemask_epi8(cmp);
  4558. if (mask) {
  4559. for (size_t j = 0; j < 8; ++j) {
  4560. if (!validate_float(f[i + j], i + j)) {
  4561. return false;
  4562. }
  4563. }
  4564. GGML_UNREACHABLE();
  4565. }
  4566. }
  4567. #elif defined(__ARM_NEON)
  4568. for (; i + 3 < nb; i += 4) {
  4569. uint32x4_t v = vld1q_u32((const uint32_t *)f + i);
  4570. uint32x4_t vexp = vandq_u32(v, vdupq_n_u32(0x7f800000));
  4571. uint32x4_t cmp = vceqq_u32(vexp, vdupq_n_u32(0x7f800000));
  4572. uint64_t mask = vget_lane_u64(vreinterpret_u64_u16(vshrn_n_u32(cmp, 8)), 0);
  4573. if (mask) {
  4574. for (size_t j = 0; j < 4; ++j) {
  4575. if (!validate_float(f[i + j], i + j)) {
  4576. return false;
  4577. }
  4578. }
  4579. GGML_UNREACHABLE();
  4580. }
  4581. }
  4582. #endif
  4583. for (; i < nb; ++i) {
  4584. if (!validate_float(f[i], i)) {
  4585. return false;
  4586. }
  4587. }
  4588. } break;
  4589. case GGML_TYPE_F64:
  4590. {
  4591. const double * f = (const double *) data;
  4592. for (size_t i = 0; i < nb; ++i) {
  4593. if (!validate_float(f[i], i)) {
  4594. return false;
  4595. }
  4596. }
  4597. } break;
  4598. case GGML_TYPE_Q4_0:
  4599. {
  4600. VALIDATE_ROW_DATA_D_F16_IMPL(block_q4_0, data, nb);
  4601. } break;
  4602. case GGML_TYPE_Q4_1:
  4603. {
  4604. VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_1, data, nb, d, m);
  4605. } break;
  4606. case GGML_TYPE_Q5_0:
  4607. {
  4608. VALIDATE_ROW_DATA_D_F16_IMPL(block_q5_0, data, nb);
  4609. } break;
  4610. case GGML_TYPE_Q5_1:
  4611. {
  4612. VALIDATE_ROW_DATA_DM_F16_IMPL(block_q5_1, data, nb, d, m);
  4613. } break;
  4614. case GGML_TYPE_Q8_0:
  4615. {
  4616. VALIDATE_ROW_DATA_D_F16_IMPL(block_q8_0, data, nb);
  4617. } break;
  4618. case GGML_TYPE_Q2_K:
  4619. {
  4620. VALIDATE_ROW_DATA_DM_F16_IMPL(block_q2_K, data, nb, d, dmin);
  4621. } break;
  4622. case GGML_TYPE_Q3_K:
  4623. {
  4624. VALIDATE_ROW_DATA_D_F16_IMPL(block_q3_K, data, nb);
  4625. } break;
  4626. case GGML_TYPE_Q4_K:
  4627. {
  4628. VALIDATE_ROW_DATA_DM_F16_IMPL(block_q4_K, data, nb, d, dmin);
  4629. } break;
  4630. case GGML_TYPE_Q5_K:
  4631. {
  4632. VALIDATE_ROW_DATA_DM_F16_IMPL(block_q5_K, data, nb, d, dmin);
  4633. } break;
  4634. case GGML_TYPE_Q6_K:
  4635. {
  4636. VALIDATE_ROW_DATA_D_F16_IMPL(block_q6_K, data, nb);
  4637. } break;
  4638. case GGML_TYPE_Q8_K:
  4639. {
  4640. const block_q8_K * q = (const block_q8_K *) data;
  4641. for (size_t i = 0; i < nb; ++i) {
  4642. if (!validate_float(q[i].d, i)) {
  4643. return false;
  4644. }
  4645. }
  4646. } break;
  4647. case GGML_TYPE_TQ1_0:
  4648. {
  4649. VALIDATE_ROW_DATA_D_F16_IMPL(block_tq1_0, data, nb);
  4650. } break;
  4651. case GGML_TYPE_TQ2_0:
  4652. {
  4653. VALIDATE_ROW_DATA_D_F16_IMPL(block_tq2_0, data, nb);
  4654. } break;
  4655. case GGML_TYPE_IQ1_S:
  4656. {
  4657. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq1_s, data, nb);
  4658. } break;
  4659. case GGML_TYPE_IQ1_M:
  4660. {
  4661. const block_iq1_m * q = (const block_iq1_m *) data;
  4662. for (size_t i = 0; i < nb; ++i) {
  4663. iq1m_scale_t scale;
  4664. const uint16_t * sc = (const uint16_t *)q[i].scales;
  4665. scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
  4666. if (!validate_fp16(scale.f16, i)) {
  4667. return false;
  4668. }
  4669. }
  4670. } break;
  4671. case GGML_TYPE_IQ2_XXS:
  4672. {
  4673. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_xxs, data, nb);
  4674. } break;
  4675. case GGML_TYPE_IQ2_XS:
  4676. {
  4677. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_xs, data, nb);
  4678. } break;
  4679. case GGML_TYPE_IQ2_S:
  4680. {
  4681. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq2_s, data, nb);
  4682. } break;
  4683. case GGML_TYPE_IQ3_XXS:
  4684. {
  4685. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq3_xxs, data, nb);
  4686. } break;
  4687. case GGML_TYPE_IQ3_S:
  4688. {
  4689. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq3_s, data, nb);
  4690. } break;
  4691. case GGML_TYPE_IQ4_XS:
  4692. {
  4693. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_xs, data, nb);
  4694. } break;
  4695. case GGML_TYPE_IQ4_NL:
  4696. {
  4697. VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_nl, data, nb);
  4698. } break;
  4699. case GGML_TYPE_Q4_0_4_4:
  4700. case GGML_TYPE_Q4_0_4_8:
  4701. {
  4702. VALIDATE_ROW_DATA_DVEC_F16_IMPL(block_q4_0x4, data, nbytes / sizeof(block_q4_0x4), 4);
  4703. } break;
  4704. case GGML_TYPE_Q4_0_8_8:
  4705. {
  4706. VALIDATE_ROW_DATA_DVEC_F16_IMPL(block_q4_0x8, data, nbytes / sizeof(block_q4_0x8), 8);
  4707. } break;
  4708. case GGML_TYPE_I8:
  4709. case GGML_TYPE_I16:
  4710. case GGML_TYPE_I32:
  4711. case GGML_TYPE_I64:
  4712. // nothing to validate
  4713. break;
  4714. default:
  4715. {
  4716. fprintf(stderr, "%s: invalid type %d\n", __func__, type);
  4717. return false;
  4718. }
  4719. }
  4720. return true;
  4721. }