12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255 |
- #include "mmq.cuh"
- #include "vecdotq.cuh"
- typedef void (*allocate_tiles_cuda_t)(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc);
- typedef void (*load_tiles_cuda_t)(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row);
- typedef float (*vec_dot_q_mul_mat_cuda_t)(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ms, const int & i, const int & j, const int & k);
- typedef void (*dot_kernel_k_t)(const void * __restrict__ vx, const int ib, const int iqs, const float * __restrict__ y, float & v);
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- GGML_UNUSED(x_qh);
- GGML_UNUSED(x_sc);
- __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y];
- __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI4_0) + mmq_y/QI4_0];
- *x_ql = tile_x_qs;
- *x_dm = (half2 *) tile_x_d;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_0(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- GGML_CUDA_ASSUME(i_offset >= 0);
- GGML_CUDA_ASSUME(i_offset < nwarps);
- GGML_CUDA_ASSUME(k >= 0);
- GGML_CUDA_ASSUME(k < WARP_SIZE);
- const int kbx = k / QI4_0;
- const int kqsx = k % QI4_0;
- const block_q4_0 * bx0 = (const block_q4_0 *) vx;
- float * x_dmf = (float *) x_dm;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx;
- x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
- // x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d;
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI4_0;
- const int kbxd = k % blocks_per_tile_x_row;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_0) {
- int i = i0 + i_offset * QI4_0 + k / blocks_per_tile_x_row;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = bxi->d;
- }
- }
- static __device__ __forceinline__ float vec_dot_q4_0_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
- const float * x_dmf = (const float *) x_dm;
- int u[2*VDR_Q4_0_Q8_1_MMQ];
- #pragma unroll
- for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) {
- u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
- u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE];
- }
- return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMQ>
- (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0],
- y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_1) + mmq_y/QI4_1];
- *x_ql = tile_x_qs;
- *x_dm = tile_x_dm;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_1(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- GGML_CUDA_ASSUME(i_offset >= 0);
- GGML_CUDA_ASSUME(i_offset < nwarps);
- GGML_CUDA_ASSUME(k >= 0);
- GGML_CUDA_ASSUME(k < WARP_SIZE);
- const int kbx = k / QI4_1;
- const int kqsx = k % QI4_1;
- const block_q4_1 * bx0 = (const block_q4_1 *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbx;
- x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI4_1;
- const int kbxd = k % blocks_per_tile_x_row;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_1) {
- int i = i0 + i_offset * QI4_1 + k / blocks_per_tile_x_row;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dm[i * (WARP_SIZE/QI4_1) + i / QI4_1 + kbxd] = bxi->dm;
- }
- }
- static __device__ __forceinline__ float vec_dot_q4_1_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
- int u[2*VDR_Q4_1_Q8_1_MMQ];
- #pragma unroll
- for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) {
- u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
- u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_1) % WARP_SIZE];
- }
- return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMQ>
- (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dm[i * (WARP_SIZE/QI4_1) + i/QI4_1 + k/QI4_1],
- y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
- __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI5_0) + mmq_y/QI5_0];
- *x_ql = tile_x_ql;
- *x_dm = (half2 *) tile_x_d;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_0(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- GGML_CUDA_ASSUME(i_offset >= 0);
- GGML_CUDA_ASSUME(i_offset < nwarps);
- GGML_CUDA_ASSUME(k >= 0);
- GGML_CUDA_ASSUME(k < WARP_SIZE);
- const int kbx = k / QI5_0;
- const int kqsx = k % QI5_0;
- const block_q5_0 * bx0 = (const block_q5_0 *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbx;
- const int ql = get_int_from_uint8(bxi->qs, kqsx);
- const int qh = get_int_from_uint8(bxi->qh, 0) >> (4 * (k % QI5_0));
- int qs0 = (ql >> 0) & 0x0F0F0F0F;
- qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
- qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
- qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
- qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
- qs0 = __vsubss4(qs0, 0x10101010); // subtract 16
- x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
- int qs1 = (ql >> 4) & 0x0F0F0F0F;
- qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
- qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
- qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
- qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
- qs1 = __vsubss4(qs1, 0x10101010); // subtract 16
- x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI5_0;
- const int kbxd = k % blocks_per_tile_x_row;
- float * x_dmf = (float *) x_dm;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_0) {
- int i = i0 + i_offset * QI5_0 + k / blocks_per_tile_x_row;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dmf[i * (WARP_SIZE/QI5_0) + i / QI5_0 + kbxd] = bxi->d;
- }
- }
- static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
- const int index_bx = i * (WARP_SIZE/QI5_0) + i/QI5_0 + k/QI5_0;
- const float * x_dmf = (const float *) x_dm;
- const float * y_df = (const float *) y_ds;
- int u[2*VDR_Q5_0_Q8_1_MMQ];
- #pragma unroll
- for (int l = 0; l < VDR_Q5_0_Q8_1_MMQ; ++l) {
- u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
- u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE];
- }
- return vec_dot_q8_0_q8_1_impl<QR5_0*VDR_Q5_0_Q8_1_MMQ>
- (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_1) + mmq_y/QI5_1];
- *x_ql = tile_x_ql;
- *x_dm = tile_x_dm;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_1(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- GGML_CUDA_ASSUME(i_offset >= 0);
- GGML_CUDA_ASSUME(i_offset < nwarps);
- GGML_CUDA_ASSUME(k >= 0);
- GGML_CUDA_ASSUME(k < WARP_SIZE);
- const int kbx = k / QI5_1;
- const int kqsx = k % QI5_1;
- const block_q5_1 * bx0 = (const block_q5_1 *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbx;
- const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
- const int qh = get_int_from_uint8_aligned(bxi->qh, 0) >> (4 * (k % QI5_1));
- int qs0 = (ql >> 0) & 0x0F0F0F0F;
- qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
- qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
- qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
- qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
- x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
- int qs1 = (ql >> 4) & 0x0F0F0F0F;
- qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
- qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
- qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
- qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
- x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI5_1;
- const int kbxd = k % blocks_per_tile_x_row;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_1) {
- int i = i0 + i_offset * QI5_1 + k / blocks_per_tile_x_row;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dm[i * (WARP_SIZE/QI5_1) + i / QI5_1 + kbxd] = bxi->dm;
- }
- }
- static __device__ __forceinline__ float vec_dot_q5_1_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
- const int index_bx = i * (WARP_SIZE/QI5_1) + + i/QI5_1 + k/QI5_1;
- int u[2*VDR_Q5_1_Q8_1_MMQ];
- #pragma unroll
- for (int l = 0; l < VDR_Q5_1_Q8_1_MMQ; ++l) {
- u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
- u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_1) % WARP_SIZE];
- }
- return vec_dot_q8_1_q8_1_impl<QR5_1*VDR_Q5_1_Q8_1_MMQ>
- (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q8_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y];
- __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI8_0) + mmq_y/QI8_0];
- *x_ql = tile_x_qs;
- *x_dm = (half2 *) tile_x_d;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q8_0(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- GGML_CUDA_ASSUME(i_offset >= 0);
- GGML_CUDA_ASSUME(i_offset < nwarps);
- GGML_CUDA_ASSUME(k >= 0);
- GGML_CUDA_ASSUME(k < WARP_SIZE);
- const int kbx = k / QI8_0;
- const int kqsx = k % QI8_0;
- float * x_dmf = (float *) x_dm;
- const block_q8_0 * bx0 = (const block_q8_0 *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbx;
- x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_int8(bxi->qs, kqsx);
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI8_0;
- const int kbxd = k % blocks_per_tile_x_row;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI8_0) {
- int i = i0 + i_offset * QI8_0 + k / blocks_per_tile_x_row;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dmf[i * (WARP_SIZE/QI8_0) + i / QI8_0 + kbxd] = bxi->d;
- }
- }
- static __device__ __forceinline__ float vec_dot_q8_0_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
- const float * x_dmf = (const float *) x_dm;
- const float * y_df = (const float *) y_ds;
- return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMQ>
- (&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0],
- y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q2_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- GGML_UNUSED(x_qh);
- __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI2_K) + mmq_y/QI2_K];
- __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4];
- *x_ql = tile_x_ql;
- *x_dm = tile_x_dm;
- *x_sc = tile_x_sc;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q2_K(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- GGML_UNUSED(x_qh);
- GGML_CUDA_ASSUME(i_offset >= 0);
- GGML_CUDA_ASSUME(i_offset < nwarps);
- GGML_CUDA_ASSUME(k >= 0);
- GGML_CUDA_ASSUME(k < WARP_SIZE);
- const int kbx = k / QI2_K;
- const int kqsx = k % QI2_K;
- const block_q2_K * bx0 = (const block_q2_K *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q2_K * bxi = bx0 + i*blocks_per_row + kbx;
- x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI2_K;
- const int kbxd = k % blocks_per_tile_x_row;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI2_K) {
- int i = (i0 + i_offset * QI2_K + k / blocks_per_tile_x_row) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q2_K * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm;
- }
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
- int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI2_K/4);
- x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4));
- }
- }
- static __device__ __forceinline__ float vec_dot_q2_K_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- GGML_UNUSED(x_qh);
- const int kbx = k / QI2_K;
- const int ky = (k % QI2_K) * QR2_K;
- const float * y_df = (const float *) y_ds;
- int v[QR2_K*VDR_Q2_K_Q8_1_MMQ];
- const int kqsx = i * (WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2);
- const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2));
- #pragma unroll
- for (int l = 0; l < QR2_K*VDR_Q2_K_Q8_1_MMQ; ++l) {
- v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303;
- }
- const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE/4) + i/4 + kbx*4]) + ky/4;
- const int index_y = j * WARP_SIZE + (QR2_K*k) % WARP_SIZE;
- return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q3_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI3_K) + mmq_y/QI3_K];
- __shared__ int tile_x_qh[mmq_y * (WARP_SIZE/2) + mmq_y/2];
- __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4];
- *x_ql = tile_x_ql;
- *x_dm = tile_x_dm;
- *x_qh = tile_x_qh;
- *x_sc = tile_x_sc;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q3_K(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- GGML_CUDA_ASSUME(i_offset >= 0);
- GGML_CUDA_ASSUME(i_offset < nwarps);
- GGML_CUDA_ASSUME(k >= 0);
- GGML_CUDA_ASSUME(k < WARP_SIZE);
- const int kbx = k / QI3_K;
- const int kqsx = k % QI3_K;
- const block_q3_K * bx0 = (const block_q3_K *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q3_K * bxi = bx0 + i*blocks_per_row + kbx;
- x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI3_K;
- const int kbxd = k % blocks_per_tile_x_row;
- float * x_dmf = (float *) x_dm;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI3_K) {
- int i = (i0 + i_offset * QI3_K + k / blocks_per_tile_x_row) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q3_K * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = bxi->d;
- }
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) {
- int i = i0 + i_offset * 2 + k / (WARP_SIZE/2);
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/2)) / (QI3_K/2);
- // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
- x_qh[i * (WARP_SIZE/2) + i / 2 + k % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2));
- }
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
- int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI3_K/4);
- const int ksc = k % (QI3_K/4);
- const int ksc_low = ksc % (QI3_K/8);
- const int shift_low = 4 * (ksc / (QI3_K/8));
- const int sc_low = (get_int_from_uint8(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F;
- const int ksc_high = QI3_K/8;
- const int shift_high = 2 * ksc;
- const int sc_high = ((get_int_from_uint8(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030;
- const int sc = __vsubss4(sc_low | sc_high, 0x20202020);
- x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = sc;
- }
- }
- static __device__ __forceinline__ float vec_dot_q3_K_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- const int kbx = k / QI3_K;
- const int ky = (k % QI3_K) * QR3_K;
- const float * x_dmf = (const float *) x_dm;
- const float * y_df = (const float *) y_ds;
- const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4;
- int v[QR3_K*VDR_Q3_K_Q8_1_MMQ];
- #pragma unroll
- for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) {
- const int kqsx = i * (WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2);
- const int shift = 2 * ((ky % 32) / 8);
- const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303;
- const int vh = x_qh[i * (WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8);
- const int vlh = (vh << 2) & 0x04040404;
- v[l] = __vsubss4(vll, vlh);
- }
- const int index_y = j * WARP_SIZE + (k*QR3_K) % WARP_SIZE;
- return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- GGML_UNUSED(x_qh);
- __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_K) + mmq_y/QI4_K];
- __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
- *x_ql = tile_x_ql;
- *x_dm = tile_x_dm;
- *x_sc = tile_x_sc;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_K(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- GGML_UNUSED(x_qh);
- GGML_CUDA_ASSUME(i_offset >= 0);
- GGML_CUDA_ASSUME(i_offset < nwarps);
- GGML_CUDA_ASSUME(k >= 0);
- GGML_CUDA_ASSUME(k < WARP_SIZE);
- const int kbx = k / QI4_K; // == 0 if QK_K == 256
- const int kqsx = k % QI4_K; // == k if QK_K == 256
- const block_q4_K * bx0 = (const block_q4_K *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_K * bxi = bx0 + i*blocks_per_row + kbx;
- x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256
- const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_K) {
- int i = (i0 + i_offset * QI4_K + k / blocks_per_tile_x_row) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_K * bxi = bx0 + i*blocks_per_row + kbxd;
- #if QK_K == 256
- x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = bxi->dm;
- #else
- x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = {bxi->dm[0], bxi->dm[1]};
- #endif
- }
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
- int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI4_K/8);
- const int * scales = (const int *) bxi->scales;
- const int ksc = k % (WARP_SIZE/8);
- // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
- int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
- scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
- x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
- }
- }
- static __device__ __forceinline__ float vec_dot_q4_K_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- GGML_UNUSED(x_qh);
- const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2*((k % 16) / 8);
- const int index_y = j * WARP_SIZE + (QR4_K*k) % WARP_SIZE;
- return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[index_y], sc, sc+8,
- x_dm[i * (WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- GGML_UNUSED(x_qh);
- __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_K) + mmq_y/QI5_K];
- __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
- *x_ql = tile_x_ql;
- *x_dm = tile_x_dm;
- *x_sc = tile_x_sc;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_K(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- GGML_UNUSED(x_qh);
- GGML_CUDA_ASSUME(i_offset >= 0);
- GGML_CUDA_ASSUME(i_offset < nwarps);
- GGML_CUDA_ASSUME(k >= 0);
- GGML_CUDA_ASSUME(k < WARP_SIZE);
- const int kbx = k / QI5_K; // == 0 if QK_K == 256
- const int kqsx = k % QI5_K; // == k if QK_K == 256
- const block_q5_K * bx0 = (const block_q5_K *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_K * bxi = bx0 + i*blocks_per_row + kbx;
- const int ky = QR5_K*kqsx;
- const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
- const int ql0 = (ql >> 0) & 0x0F0F0F0F;
- const int ql1 = (ql >> 4) & 0x0F0F0F0F;
- const int qh = get_int_from_uint8_aligned(bxi->qh, kqsx % (QI5_K/4));
- const int qh0 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 0)) << 4) & 0x10101010;
- const int qh1 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 1)) << 4) & 0x10101010;
- const int kq0 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + 0;
- const int kq1 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + (QI5_K/4);
- x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0;
- x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1;
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256
- const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_K) {
- int i = (i0 + i_offset * QI5_K + k / blocks_per_tile_x_row) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_K * bxi = bx0 + i*blocks_per_row + kbxd;
- #if QK_K == 256
- x_dm[i * (WARP_SIZE/QI5_K) + i / QI5_K + kbxd] = bxi->dm;
- #endif
- }
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
- int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI5_K/8);
- const int * scales = (const int *) bxi->scales;
- const int ksc = k % (WARP_SIZE/8);
- // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
- int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
- scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
- x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
- }
- }
- static __device__ __forceinline__ float vec_dot_q5_K_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- GGML_UNUSED(x_qh);
- const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2 * ((k % 16) / 8);
- const int index_x = i * (QR5_K*WARP_SIZE + 1) + QR5_K*k;
- const int index_y = j * WARP_SIZE + (QR5_K*k) % WARP_SIZE;
- return vec_dot_q5_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, sc+8,
- x_dm[i * (WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q6_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- GGML_UNUSED(x_qh);
- __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI6_K) + mmq_y/QI6_K];
- __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
- *x_ql = tile_x_ql;
- *x_dm = tile_x_dm;
- *x_sc = tile_x_sc;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q6_K(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- GGML_UNUSED(x_qh);
- GGML_CUDA_ASSUME(i_offset >= 0);
- GGML_CUDA_ASSUME(i_offset < nwarps);
- GGML_CUDA_ASSUME(k >= 0);
- GGML_CUDA_ASSUME(k < WARP_SIZE);
- const int kbx = k / QI6_K; // == 0 if QK_K == 256
- const int kqsx = k % QI6_K; // == k if QK_K == 256
- const block_q6_K * bx0 = (const block_q6_K *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q6_K * bxi = bx0 + i*blocks_per_row + kbx;
- const int ky = QR6_K*kqsx;
- const int ql = get_int_from_uint8(bxi->ql, kqsx);
- const int ql0 = (ql >> 0) & 0x0F0F0F0F;
- const int ql1 = (ql >> 4) & 0x0F0F0F0F;
- const int qh = get_int_from_uint8(bxi->qh, (QI6_K/4) * (kqsx / (QI6_K/2)) + kqsx % (QI6_K/4));
- const int qh0 = ((qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) << 4) & 0x30303030;
- const int qh1 = (qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) & 0x30303030;
- const int kq0 = ky - ky % QI6_K + k % (QI6_K/2) + 0;
- const int kq1 = ky - ky % QI6_K + k % (QI6_K/2) + (QI6_K/2);
- x_ql[i * (2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020);
- x_ql[i * (2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020);
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256
- const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
- float * x_dmf = (float *) x_dm;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI6_K) {
- int i = (i0 + i_offset * QI6_K + k / blocks_per_tile_x_row) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q6_K * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dmf[i * (WARP_SIZE/QI6_K) + i / QI6_K + kbxd] = bxi->d;
- }
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
- int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / 4;
- x_sc[i * (WARP_SIZE/8) + i / 8 + k % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8));
- }
- }
- static __device__ __forceinline__ float vec_dot_q6_K_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- GGML_UNUSED(x_qh);
- const float * x_dmf = (const float *) x_dm;
- const float * y_df = (const float *) y_ds;
- const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/8]);
- const int index_x = i * (QR6_K*WARP_SIZE + 1) + QR6_K*k;
- const int index_y = j * WARP_SIZE + (QR6_K*k) % WARP_SIZE;
- return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]);
- }
- #define MMQ_X_Q4_0_RDNA2 64
- #define MMQ_Y_Q4_0_RDNA2 128
- #define NWARPS_Q4_0_RDNA2 8
- #define MMQ_X_Q4_0_RDNA1 64
- #define MMQ_Y_Q4_0_RDNA1 64
- #define NWARPS_Q4_0_RDNA1 8
- #if defined(CUDA_USE_TENSOR_CORES)
- #define MMQ_X_Q4_0_AMPERE 4
- #define MMQ_Y_Q4_0_AMPERE 32
- #define NWARPS_Q4_0_AMPERE 4
- #else
- #define MMQ_X_Q4_0_AMPERE 64
- #define MMQ_Y_Q4_0_AMPERE 128
- #define NWARPS_Q4_0_AMPERE 4
- #endif
- #define MMQ_X_Q4_0_PASCAL 64
- #define MMQ_Y_Q4_0_PASCAL 64
- #define NWARPS_Q4_0_PASCAL 8
- template <int qk, int qr, int qi, bool need_sum, typename block_q_t, int mmq_x, int mmq_y, int nwarps,
- allocate_tiles_cuda_t allocate_tiles, load_tiles_cuda_t load_tiles, int vdr, vec_dot_q_mul_mat_cuda_t vec_dot>
- static __device__ __forceinline__ void mul_mat_q(
- const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- const block_q_t * x = (const block_q_t *) vx;
- const block_q8_1 * y = (const block_q8_1 *) vy;
- const int blocks_per_row_x = ncols_x / qk;
- const int blocks_per_col_y = nrows_y / QK8_1;
- const int blocks_per_warp = WARP_SIZE / qi;
- const int & ncols_dst = ncols_y;
- const int row_dst_0 = blockIdx.x*mmq_y;
- const int & row_x_0 = row_dst_0;
- const int col_dst_0 = blockIdx.y*mmq_x;
- const int & col_y_0 = col_dst_0;
- int * tile_x_ql = nullptr;
- half2 * tile_x_dm = nullptr;
- int * tile_x_qh = nullptr;
- int * tile_x_sc = nullptr;
- allocate_tiles(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc);
- __shared__ int tile_y_qs[mmq_x * WARP_SIZE];
- __shared__ half2 tile_y_ds[mmq_x * WARP_SIZE/QI8_1];
- float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {{0.0f}};
- for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) {
- load_tiles(x + row_x_0*blocks_per_row_x + ib0, tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc,
- threadIdx.y, nrows_x-row_x_0-1, threadIdx.x, blocks_per_row_x);
- #pragma unroll
- for (int ir = 0; ir < qr; ++ir) {
- const int kqs = ir*WARP_SIZE + threadIdx.x;
- const int kbxd = kqs / QI8_1;
- #pragma unroll
- for (int i = 0; i < mmq_x; i += nwarps) {
- const int col_y_eff = min(col_y_0 + threadIdx.y + i, ncols_y-1); // to prevent out-of-bounds memory accesses
- const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd];
- const int index_y = (threadIdx.y + i) * WARP_SIZE + kqs % WARP_SIZE;
- tile_y_qs[index_y] = get_int_from_int8_aligned(by0->qs, threadIdx.x % QI8_1);
- }
- #pragma unroll
- for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) {
- const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE/QI8_1)) % mmq_x;
- const int kby = threadIdx.x % (WARP_SIZE/QI8_1);
- const int col_y_eff = min(col_y_0 + ids, ncols_y-1);
- // if the sum is not needed it's faster to transform the scale to f32 ahead of time
- const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE/QI8_1) + kby].ds;
- half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE/QI8_1) + kby];
- if (need_sum) {
- *dsi_dst = *dsi_src;
- } else {
- float * dfi_dst = (float *) dsi_dst;
- *dfi_dst = __low2float(*dsi_src);
- }
- }
- __syncthreads();
- // #pragma unroll // unrolling this loop causes too much register pressure
- for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) {
- #pragma unroll
- for (int j = 0; j < mmq_x; j += nwarps) {
- #pragma unroll
- for (int i = 0; i < mmq_y; i += WARP_SIZE) {
- sum[i/WARP_SIZE][j/nwarps] += vec_dot(
- tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, tile_y_qs, tile_y_ds,
- threadIdx.x + i, threadIdx.y + j, k);
- }
- }
- }
- __syncthreads();
- }
- }
- #pragma unroll
- for (int j = 0; j < mmq_x; j += nwarps) {
- const int col_dst = col_dst_0 + j + threadIdx.y;
- if (col_dst >= ncols_dst) {
- return;
- }
- #pragma unroll
- for (int i = 0; i < mmq_y; i += WARP_SIZE) {
- const int row_dst = row_dst_0 + threadIdx.x + i;
- if (row_dst >= nrows_dst) {
- continue;
- }
- dst[col_dst*nrows_dst + row_dst] = sum[i/WARP_SIZE][j/nwarps];
- }
- }
- }
- template <bool need_check> static __global__ void
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- __launch_bounds__(WARP_SIZE*NWARPS_Q4_0_RDNA2, 2)
- #endif // defined(RDNA3) || defined(RDNA2)
- #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- mul_mat_q4_0(
- const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- const int mmq_x = MMQ_X_Q4_0_RDNA2;
- const int mmq_y = MMQ_Y_Q4_0_RDNA2;
- const int nwarps = NWARPS_Q4_0_RDNA2;
- #else
- const int mmq_x = MMQ_X_Q4_0_RDNA1;
- const int mmq_y = MMQ_Y_Q4_0_RDNA1;
- const int nwarps = NWARPS_Q4_0_RDNA1;
- #endif // defined(RDNA3) || defined(RDNA2)
- mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
- load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= CC_VOLTA
- const int mmq_x = MMQ_X_Q4_0_AMPERE;
- const int mmq_y = MMQ_Y_Q4_0_AMPERE;
- const int nwarps = NWARPS_Q4_0_AMPERE;
- mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
- load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= MIN_CC_DP4A
- const int mmq_x = MMQ_X_Q4_0_PASCAL;
- const int mmq_y = MMQ_Y_Q4_0_PASCAL;
- const int nwarps = NWARPS_Q4_0_PASCAL;
- mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
- load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #else
- GGML_UNUSED(vec_dot_q4_0_q8_1_mul_mat);
- NO_DEVICE_CODE;
- #endif // __CUDA_ARCH__ >= CC_VOLTA
- }
- #define MMQ_X_Q4_1_RDNA2 64
- #define MMQ_Y_Q4_1_RDNA2 128
- #define NWARPS_Q4_1_RDNA2 8
- #define MMQ_X_Q4_1_RDNA1 64
- #define MMQ_Y_Q4_1_RDNA1 64
- #define NWARPS_Q4_1_RDNA1 8
- #if defined(CUDA_USE_TENSOR_CORES)
- #define MMQ_X_Q4_1_AMPERE 4
- #define MMQ_Y_Q4_1_AMPERE 32
- #define NWARPS_Q4_1_AMPERE 4
- #else
- #define MMQ_X_Q4_1_AMPERE 64
- #define MMQ_Y_Q4_1_AMPERE 128
- #define NWARPS_Q4_1_AMPERE 4
- #endif
- #define MMQ_X_Q4_1_PASCAL 64
- #define MMQ_Y_Q4_1_PASCAL 64
- #define NWARPS_Q4_1_PASCAL 8
- template <bool need_check> static __global__ void
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- __launch_bounds__(WARP_SIZE*NWARPS_Q4_1_RDNA2, 2)
- #endif // defined(RDNA3) || defined(RDNA2)
- #elif __CUDA_ARCH__ < CC_VOLTA
- __launch_bounds__(WARP_SIZE*NWARPS_Q4_1_PASCAL, 2)
- #endif // __CUDA_ARCH__ < CC_VOLTA
- mul_mat_q4_1(
- const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- const int mmq_x = MMQ_X_Q4_1_RDNA2;
- const int mmq_y = MMQ_Y_Q4_1_RDNA2;
- const int nwarps = NWARPS_Q4_1_RDNA2;
- #else
- const int mmq_x = MMQ_X_Q4_1_RDNA1;
- const int mmq_y = MMQ_Y_Q4_1_RDNA1;
- const int nwarps = NWARPS_Q4_1_RDNA1;
- #endif // defined(RDNA3) || defined(RDNA2)
- mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
- load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= CC_VOLTA
- const int mmq_x = MMQ_X_Q4_1_AMPERE;
- const int mmq_y = MMQ_Y_Q4_1_AMPERE;
- const int nwarps = NWARPS_Q4_1_AMPERE;
- mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
- load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= MIN_CC_DP4A
- const int mmq_x = MMQ_X_Q4_1_PASCAL;
- const int mmq_y = MMQ_Y_Q4_1_PASCAL;
- const int nwarps = NWARPS_Q4_1_PASCAL;
- mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
- load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #else
- GGML_UNUSED(vec_dot_q4_1_q8_1_mul_mat);
- NO_DEVICE_CODE;
- #endif // __CUDA_ARCH__ >= CC_VOLTA
- }
- #define MMQ_X_Q5_0_RDNA2 64
- #define MMQ_Y_Q5_0_RDNA2 128
- #define NWARPS_Q5_0_RDNA2 8
- #define MMQ_X_Q5_0_RDNA1 64
- #define MMQ_Y_Q5_0_RDNA1 64
- #define NWARPS_Q5_0_RDNA1 8
- #if defined(CUDA_USE_TENSOR_CORES)
- #define MMQ_X_Q5_0_AMPERE 4
- #define MMQ_Y_Q5_0_AMPERE 32
- #define NWARPS_Q5_0_AMPERE 4
- #else
- #define MMQ_X_Q5_0_AMPERE 128
- #define MMQ_Y_Q5_0_AMPERE 64
- #define NWARPS_Q5_0_AMPERE 4
- #endif
- #define MMQ_X_Q5_0_PASCAL 64
- #define MMQ_Y_Q5_0_PASCAL 64
- #define NWARPS_Q5_0_PASCAL 8
- template <bool need_check> static __global__ void
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- __launch_bounds__(WARP_SIZE*NWARPS_Q5_0_RDNA2, 2)
- #endif // defined(RDNA3) || defined(RDNA2)
- #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- mul_mat_q5_0(
- const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- const int mmq_x = MMQ_X_Q5_0_RDNA2;
- const int mmq_y = MMQ_Y_Q5_0_RDNA2;
- const int nwarps = NWARPS_Q5_0_RDNA2;
- #else
- const int mmq_x = MMQ_X_Q5_0_RDNA1;
- const int mmq_y = MMQ_Y_Q5_0_RDNA1;
- const int nwarps = NWARPS_Q5_0_RDNA1;
- #endif // defined(RDNA3) || defined(RDNA2)
- mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
- load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= CC_VOLTA
- const int mmq_x = MMQ_X_Q5_0_AMPERE;
- const int mmq_y = MMQ_Y_Q5_0_AMPERE;
- const int nwarps = NWARPS_Q5_0_AMPERE;
- mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
- load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= MIN_CC_DP4A
- const int mmq_x = MMQ_X_Q5_0_PASCAL;
- const int mmq_y = MMQ_Y_Q5_0_PASCAL;
- const int nwarps = NWARPS_Q5_0_PASCAL;
- mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
- load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #else
- GGML_UNUSED(vec_dot_q5_0_q8_1_mul_mat);
- NO_DEVICE_CODE;
- #endif // __CUDA_ARCH__ >= CC_VOLTA
- }
- #define MMQ_X_Q5_1_RDNA2 64
- #define MMQ_Y_Q5_1_RDNA2 128
- #define NWARPS_Q5_1_RDNA2 8
- #define MMQ_X_Q5_1_RDNA1 64
- #define MMQ_Y_Q5_1_RDNA1 64
- #define NWARPS_Q5_1_RDNA1 8
- #if defined(CUDA_USE_TENSOR_CORES)
- #define MMQ_X_Q5_1_AMPERE 4
- #define MMQ_Y_Q5_1_AMPERE 32
- #define NWARPS_Q5_1_AMPERE 4
- #else
- #define MMQ_X_Q5_1_AMPERE 128
- #define MMQ_Y_Q5_1_AMPERE 64
- #define NWARPS_Q5_1_AMPERE 4
- #endif
- #define MMQ_X_Q5_1_PASCAL 64
- #define MMQ_Y_Q5_1_PASCAL 64
- #define NWARPS_Q5_1_PASCAL 8
- template <bool need_check> static __global__ void
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- __launch_bounds__(WARP_SIZE*NWARPS_Q5_1_RDNA2, 2)
- #endif // defined(RDNA3) || defined(RDNA2)
- #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- mul_mat_q5_1(
- const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- const int mmq_x = MMQ_X_Q5_1_RDNA2;
- const int mmq_y = MMQ_Y_Q5_1_RDNA2;
- const int nwarps = NWARPS_Q5_1_RDNA2;
- #else
- const int mmq_x = MMQ_X_Q5_1_RDNA1;
- const int mmq_y = MMQ_Y_Q5_1_RDNA1;
- const int nwarps = NWARPS_Q5_1_RDNA1;
- #endif // defined(RDNA3) || defined(RDNA2)
- mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
- load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= CC_VOLTA
- const int mmq_x = MMQ_X_Q5_1_AMPERE;
- const int mmq_y = MMQ_Y_Q5_1_AMPERE;
- const int nwarps = NWARPS_Q5_1_AMPERE;
- mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
- load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= MIN_CC_DP4A
- const int mmq_x = MMQ_X_Q5_1_PASCAL;
- const int mmq_y = MMQ_Y_Q5_1_PASCAL;
- const int nwarps = NWARPS_Q5_1_PASCAL;
- mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
- load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #else
- GGML_UNUSED(vec_dot_q5_1_q8_1_mul_mat);
- NO_DEVICE_CODE;
- #endif // __CUDA_ARCH__ >= CC_VOLTA
- }
- #define MMQ_X_Q8_0_RDNA2 64
- #define MMQ_Y_Q8_0_RDNA2 128
- #define NWARPS_Q8_0_RDNA2 8
- #define MMQ_X_Q8_0_RDNA1 64
- #define MMQ_Y_Q8_0_RDNA1 64
- #define NWARPS_Q8_0_RDNA1 8
- #if defined(CUDA_USE_TENSOR_CORES)
- #define MMQ_X_Q8_0_AMPERE 4
- #define MMQ_Y_Q8_0_AMPERE 32
- #define NWARPS_Q8_0_AMPERE 4
- #else
- #define MMQ_X_Q8_0_AMPERE 128
- #define MMQ_Y_Q8_0_AMPERE 64
- #define NWARPS_Q8_0_AMPERE 4
- #endif
- #define MMQ_X_Q8_0_PASCAL 64
- #define MMQ_Y_Q8_0_PASCAL 64
- #define NWARPS_Q8_0_PASCAL 8
- template <bool need_check> static __global__ void
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- __launch_bounds__(WARP_SIZE*NWARPS_Q8_0_RDNA2, 2)
- #endif // defined(RDNA3) || defined(RDNA2)
- #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- mul_mat_q8_0(
- const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- const int mmq_x = MMQ_X_Q8_0_RDNA2;
- const int mmq_y = MMQ_Y_Q8_0_RDNA2;
- const int nwarps = NWARPS_Q8_0_RDNA2;
- #else
- const int mmq_x = MMQ_X_Q8_0_RDNA1;
- const int mmq_y = MMQ_Y_Q8_0_RDNA1;
- const int nwarps = NWARPS_Q8_0_RDNA1;
- #endif // defined(RDNA3) || defined(RDNA2)
- mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
- load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= CC_VOLTA
- const int mmq_x = MMQ_X_Q8_0_AMPERE;
- const int mmq_y = MMQ_Y_Q8_0_AMPERE;
- const int nwarps = NWARPS_Q8_0_AMPERE;
- mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
- load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= MIN_CC_DP4A
- const int mmq_x = MMQ_X_Q8_0_PASCAL;
- const int mmq_y = MMQ_Y_Q8_0_PASCAL;
- const int nwarps = NWARPS_Q8_0_PASCAL;
- mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
- load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #else
- GGML_UNUSED(vec_dot_q8_0_q8_1_mul_mat);
- NO_DEVICE_CODE;
- #endif // __CUDA_ARCH__ >= CC_VOLTA
- }
- #define MMQ_X_Q2_K_RDNA2 64
- #define MMQ_Y_Q2_K_RDNA2 128
- #define NWARPS_Q2_K_RDNA2 8
- #define MMQ_X_Q2_K_RDNA1 128
- #define MMQ_Y_Q2_K_RDNA1 32
- #define NWARPS_Q2_K_RDNA1 8
- #if defined(CUDA_USE_TENSOR_CORES)
- #define MMQ_X_Q2_K_AMPERE 4
- #define MMQ_Y_Q2_K_AMPERE 32
- #define NWARPS_Q2_K_AMPERE 4
- #else
- #define MMQ_X_Q2_K_AMPERE 64
- #define MMQ_Y_Q2_K_AMPERE 128
- #define NWARPS_Q2_K_AMPERE 4
- #endif
- #define MMQ_X_Q2_K_PASCAL 64
- #define MMQ_Y_Q2_K_PASCAL 64
- #define NWARPS_Q2_K_PASCAL 8
- template <bool need_check> static __global__ void
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- __launch_bounds__(WARP_SIZE*NWARPS_Q2_K_RDNA2, 2)
- #endif // defined(RDNA3) || defined(RDNA2)
- #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- mul_mat_q2_K(
- const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- const int mmq_x = MMQ_X_Q2_K_RDNA2;
- const int mmq_y = MMQ_Y_Q2_K_RDNA2;
- const int nwarps = NWARPS_Q2_K_RDNA2;
- #else
- const int mmq_x = MMQ_X_Q2_K_RDNA1;
- const int mmq_y = MMQ_Y_Q2_K_RDNA1;
- const int nwarps = NWARPS_Q2_K_RDNA1;
- #endif // defined(RDNA3) || defined(RDNA2)
- mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
- load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= CC_VOLTA
- const int mmq_x = MMQ_X_Q2_K_AMPERE;
- const int mmq_y = MMQ_Y_Q2_K_AMPERE;
- const int nwarps = NWARPS_Q2_K_AMPERE;
- mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
- load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= MIN_CC_DP4A
- const int mmq_x = MMQ_X_Q2_K_PASCAL;
- const int mmq_y = MMQ_Y_Q2_K_PASCAL;
- const int nwarps = NWARPS_Q2_K_PASCAL;
- mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
- load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #else
- GGML_UNUSED(vec_dot_q2_K_q8_1_mul_mat);
- NO_DEVICE_CODE;
- #endif // __CUDA_ARCH__ >= CC_VOLTA
- }
- #define MMQ_X_Q3_K_RDNA2 128
- #define MMQ_Y_Q3_K_RDNA2 64
- #define NWARPS_Q3_K_RDNA2 8
- #define MMQ_X_Q3_K_RDNA1 32
- #define MMQ_Y_Q3_K_RDNA1 128
- #define NWARPS_Q3_K_RDNA1 8
- #if defined(CUDA_USE_TENSOR_CORES)
- #define MMQ_X_Q3_K_AMPERE 4
- #define MMQ_Y_Q3_K_AMPERE 32
- #define NWARPS_Q3_K_AMPERE 4
- #else
- #define MMQ_X_Q3_K_AMPERE 128
- #define MMQ_Y_Q3_K_AMPERE 128
- #define NWARPS_Q3_K_AMPERE 4
- #endif
- #define MMQ_X_Q3_K_PASCAL 64
- #define MMQ_Y_Q3_K_PASCAL 64
- #define NWARPS_Q3_K_PASCAL 8
- template <bool need_check> static __global__ void
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- __launch_bounds__(WARP_SIZE*NWARPS_Q3_K_RDNA2, 2)
- #endif // defined(RDNA3) || defined(RDNA2)
- #elif __CUDA_ARCH__ < CC_VOLTA
- __launch_bounds__(WARP_SIZE*NWARPS_Q3_K_PASCAL, 2)
- #endif // __CUDA_ARCH__ < CC_VOLTA
- mul_mat_q3_K(
- const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- const int mmq_x = MMQ_X_Q3_K_RDNA2;
- const int mmq_y = MMQ_Y_Q3_K_RDNA2;
- const int nwarps = NWARPS_Q3_K_RDNA2;
- #else
- const int mmq_x = MMQ_X_Q3_K_RDNA1;
- const int mmq_y = MMQ_Y_Q3_K_RDNA1;
- const int nwarps = NWARPS_Q3_K_RDNA1;
- #endif // defined(RDNA3) || defined(RDNA2)
- mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
- load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= CC_VOLTA
- const int mmq_x = MMQ_X_Q3_K_AMPERE;
- const int mmq_y = MMQ_Y_Q3_K_AMPERE;
- const int nwarps = NWARPS_Q3_K_AMPERE;
- mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
- load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= MIN_CC_DP4A
- const int mmq_x = MMQ_X_Q3_K_PASCAL;
- const int mmq_y = MMQ_Y_Q3_K_PASCAL;
- const int nwarps = NWARPS_Q3_K_PASCAL;
- mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
- load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #else
- GGML_UNUSED(vec_dot_q3_K_q8_1_mul_mat);
- NO_DEVICE_CODE;
- #endif // __CUDA_ARCH__ >= CC_VOLTA
- }
- #define MMQ_X_Q4_K_RDNA2 64
- #define MMQ_Y_Q4_K_RDNA2 128
- #define NWARPS_Q4_K_RDNA2 8
- #define MMQ_X_Q4_K_RDNA1 32
- #define MMQ_Y_Q4_K_RDNA1 64
- #define NWARPS_Q4_K_RDNA1 8
- #if defined(CUDA_USE_TENSOR_CORES)
- #define MMQ_X_Q4_K_AMPERE 4
- #define MMQ_Y_Q4_K_AMPERE 32
- #define NWARPS_Q4_K_AMPERE 4
- #else
- #define MMQ_X_Q4_K_AMPERE 64
- #define MMQ_Y_Q4_K_AMPERE 128
- #define NWARPS_Q4_K_AMPERE 4
- #endif
- #define MMQ_X_Q4_K_PASCAL 64
- #define MMQ_Y_Q4_K_PASCAL 64
- #define NWARPS_Q4_K_PASCAL 8
- template <bool need_check> static __global__ void
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- __launch_bounds__(WARP_SIZE*NWARPS_Q4_K_RDNA2, 2)
- #endif // defined(RDNA3) || defined(RDNA2)
- #elif __CUDA_ARCH__ < CC_VOLTA
- __launch_bounds__(WARP_SIZE*NWARPS_Q4_K_PASCAL, 2)
- #endif // __CUDA_ARCH__ < CC_VOLTA
- mul_mat_q4_K(
- const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- const int mmq_x = MMQ_X_Q4_K_RDNA2;
- const int mmq_y = MMQ_Y_Q4_K_RDNA2;
- const int nwarps = NWARPS_Q4_K_RDNA2;
- #else
- const int mmq_x = MMQ_X_Q4_K_RDNA1;
- const int mmq_y = MMQ_Y_Q4_K_RDNA1;
- const int nwarps = NWARPS_Q4_K_RDNA1;
- #endif // defined(RDNA3) || defined(RDNA2)
- mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
- load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= CC_VOLTA
- const int mmq_x = MMQ_X_Q4_K_AMPERE;
- const int mmq_y = MMQ_Y_Q4_K_AMPERE;
- const int nwarps = NWARPS_Q4_K_AMPERE;
- mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
- load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= MIN_CC_DP4A
- const int mmq_x = MMQ_X_Q4_K_PASCAL;
- const int mmq_y = MMQ_Y_Q4_K_PASCAL;
- const int nwarps = NWARPS_Q4_K_PASCAL;
- mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
- load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #else
- GGML_UNUSED(vec_dot_q4_K_q8_1_mul_mat);
- NO_DEVICE_CODE;
- #endif // __CUDA_ARCH__ >= CC_VOLTA
- }
- #define MMQ_X_Q5_K_RDNA2 64
- #define MMQ_Y_Q5_K_RDNA2 128
- #define NWARPS_Q5_K_RDNA2 8
- #define MMQ_X_Q5_K_RDNA1 32
- #define MMQ_Y_Q5_K_RDNA1 64
- #define NWARPS_Q5_K_RDNA1 8
- #if defined(CUDA_USE_TENSOR_CORES)
- #define MMQ_X_Q5_K_AMPERE 4
- #define MMQ_Y_Q5_K_AMPERE 32
- #define NWARPS_Q5_K_AMPERE 4
- #else
- #define MMQ_X_Q5_K_AMPERE 64
- #define MMQ_Y_Q5_K_AMPERE 128
- #define NWARPS_Q5_K_AMPERE 4
- #endif
- #define MMQ_X_Q5_K_PASCAL 64
- #define MMQ_Y_Q5_K_PASCAL 64
- #define NWARPS_Q5_K_PASCAL 8
- template <bool need_check> static __global__ void
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- __launch_bounds__(WARP_SIZE*NWARPS_Q5_K_RDNA2, 2)
- #endif // defined(RDNA3) || defined(RDNA2)
- #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- mul_mat_q5_K(
- const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- const int mmq_x = MMQ_X_Q5_K_RDNA2;
- const int mmq_y = MMQ_Y_Q5_K_RDNA2;
- const int nwarps = NWARPS_Q5_K_RDNA2;
- #else
- const int mmq_x = MMQ_X_Q5_K_RDNA1;
- const int mmq_y = MMQ_Y_Q5_K_RDNA1;
- const int nwarps = NWARPS_Q5_K_RDNA1;
- #endif // defined(RDNA3) || defined(RDNA2)
- mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
- load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= CC_VOLTA
- const int mmq_x = MMQ_X_Q5_K_AMPERE;
- const int mmq_y = MMQ_Y_Q5_K_AMPERE;
- const int nwarps = NWARPS_Q5_K_AMPERE;
- mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
- load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= MIN_CC_DP4A
- const int mmq_x = MMQ_X_Q5_K_PASCAL;
- const int mmq_y = MMQ_Y_Q5_K_PASCAL;
- const int nwarps = NWARPS_Q5_K_PASCAL;
- mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
- load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #else
- GGML_UNUSED(vec_dot_q5_K_q8_1_mul_mat);
- NO_DEVICE_CODE;
- #endif // __CUDA_ARCH__ >= CC_VOLTA
- }
- #define MMQ_X_Q6_K_RDNA2 64
- #define MMQ_Y_Q6_K_RDNA2 128
- #define NWARPS_Q6_K_RDNA2 8
- #define MMQ_X_Q6_K_RDNA1 32
- #define MMQ_Y_Q6_K_RDNA1 64
- #define NWARPS_Q6_K_RDNA1 8
- #if defined(CUDA_USE_TENSOR_CORES)
- #define MMQ_X_Q6_K_AMPERE 4
- #define MMQ_Y_Q6_K_AMPERE 32
- #define NWARPS_Q6_K_AMPERE 4
- #else
- #define MMQ_X_Q6_K_AMPERE 64
- #define MMQ_Y_Q6_K_AMPERE 64
- #define NWARPS_Q6_K_AMPERE 4
- #endif
- #define MMQ_X_Q6_K_PASCAL 64
- #define MMQ_Y_Q6_K_PASCAL 64
- #define NWARPS_Q6_K_PASCAL 8
- template <bool need_check> static __global__ void
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- __launch_bounds__(WARP_SIZE*NWARPS_Q6_K_RDNA2, 2)
- #endif // defined(RDNA3) || defined(RDNA2)
- #elif __CUDA_ARCH__ < CC_VOLTA
- __launch_bounds__(WARP_SIZE*NWARPS_Q6_K_PASCAL, 2)
- #endif // __CUDA_ARCH__ < CC_VOLTA
- mul_mat_q6_K(
- const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- #if defined(RDNA3) || defined(RDNA2)
- const int mmq_x = MMQ_X_Q6_K_RDNA2;
- const int mmq_y = MMQ_Y_Q6_K_RDNA2;
- const int nwarps = NWARPS_Q6_K_RDNA2;
- #else
- const int mmq_x = MMQ_X_Q6_K_RDNA1;
- const int mmq_y = MMQ_Y_Q6_K_RDNA1;
- const int nwarps = NWARPS_Q6_K_RDNA1;
- #endif // defined(RDNA3) || defined(RDNA2)
- mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
- load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= CC_VOLTA
- const int mmq_x = MMQ_X_Q6_K_AMPERE;
- const int mmq_y = MMQ_Y_Q6_K_AMPERE;
- const int nwarps = NWARPS_Q6_K_AMPERE;
- mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
- load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #elif __CUDA_ARCH__ >= MIN_CC_DP4A
- const int mmq_x = MMQ_X_Q6_K_PASCAL;
- const int mmq_y = MMQ_Y_Q6_K_PASCAL;
- const int nwarps = NWARPS_Q6_K_PASCAL;
- mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
- load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- #else
- GGML_UNUSED(vec_dot_q6_K_q8_1_mul_mat);
- NO_DEVICE_CODE;
- #endif // __CUDA_ARCH__ >= CC_VOLTA
- }
- static void ggml_mul_mat_q4_0_q8_1_cuda(
- const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id = ggml_cuda_get_device();
- const int compute_capability = ggml_cuda_info().devices[id].cc;
- int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_RDNA2) {
- mmq_x = MMQ_X_Q4_0_RDNA2;
- mmq_y = MMQ_Y_Q4_0_RDNA2;
- nwarps = NWARPS_Q4_0_RDNA2;
- } else if (compute_capability >= CC_OFFSET_AMD) {
- mmq_x = MMQ_X_Q4_0_RDNA1;
- mmq_y = MMQ_Y_Q4_0_RDNA1;
- nwarps = NWARPS_Q4_0_RDNA1;
- } else if (compute_capability >= CC_VOLTA) {
- mmq_x = MMQ_X_Q4_0_AMPERE;
- mmq_y = MMQ_Y_Q4_0_AMPERE;
- nwarps = NWARPS_Q4_0_AMPERE;
- } else if (compute_capability >= MIN_CC_DP4A) {
- mmq_x = MMQ_X_Q4_0_PASCAL;
- mmq_y = MMQ_Y_Q4_0_PASCAL;
- nwarps = NWARPS_Q4_0_PASCAL;
- } else {
- GGML_ASSERT(false);
- }
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q4_0<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q4_0<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- static void ggml_mul_mat_q4_1_q8_1_cuda(
- const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id = ggml_cuda_get_device();
- const int compute_capability = ggml_cuda_info().devices[id].cc;
- int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_RDNA2) {
- mmq_x = MMQ_X_Q4_1_RDNA2;
- mmq_y = MMQ_Y_Q4_1_RDNA2;
- nwarps = NWARPS_Q4_1_RDNA2;
- } else if (compute_capability >= CC_OFFSET_AMD) {
- mmq_x = MMQ_X_Q4_1_RDNA1;
- mmq_y = MMQ_Y_Q4_1_RDNA1;
- nwarps = NWARPS_Q4_1_RDNA1;
- } else if (compute_capability >= CC_VOLTA) {
- mmq_x = MMQ_X_Q4_1_AMPERE;
- mmq_y = MMQ_Y_Q4_1_AMPERE;
- nwarps = NWARPS_Q4_1_AMPERE;
- } else if (compute_capability >= MIN_CC_DP4A) {
- mmq_x = MMQ_X_Q4_1_PASCAL;
- mmq_y = MMQ_Y_Q4_1_PASCAL;
- nwarps = NWARPS_Q4_1_PASCAL;
- } else {
- GGML_ASSERT(false);
- }
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q4_1<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q4_1<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- static void ggml_mul_mat_q5_0_q8_1_cuda(
- const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id = ggml_cuda_get_device();
- const int compute_capability = ggml_cuda_info().devices[id].cc;
- int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_RDNA2) {
- mmq_x = MMQ_X_Q5_0_RDNA2;
- mmq_y = MMQ_Y_Q5_0_RDNA2;
- nwarps = NWARPS_Q5_0_RDNA2;
- } else if (compute_capability >= CC_OFFSET_AMD) {
- mmq_x = MMQ_X_Q5_0_RDNA1;
- mmq_y = MMQ_Y_Q5_0_RDNA1;
- nwarps = NWARPS_Q5_0_RDNA1;
- } else if (compute_capability >= CC_VOLTA) {
- mmq_x = MMQ_X_Q5_0_AMPERE;
- mmq_y = MMQ_Y_Q5_0_AMPERE;
- nwarps = NWARPS_Q5_0_AMPERE;
- } else if (compute_capability >= MIN_CC_DP4A) {
- mmq_x = MMQ_X_Q5_0_PASCAL;
- mmq_y = MMQ_Y_Q5_0_PASCAL;
- nwarps = NWARPS_Q5_0_PASCAL;
- } else {
- GGML_ASSERT(false);
- }
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q5_0<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q5_0<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- static void ggml_mul_mat_q5_1_q8_1_cuda(
- const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id = ggml_cuda_get_device();
- const int compute_capability = ggml_cuda_info().devices[id].cc;
- int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_RDNA2) {
- mmq_x = MMQ_X_Q5_1_RDNA2;
- mmq_y = MMQ_Y_Q5_1_RDNA2;
- nwarps = NWARPS_Q5_1_RDNA2;
- } else if (compute_capability >= CC_OFFSET_AMD) {
- mmq_x = MMQ_X_Q5_1_RDNA1;
- mmq_y = MMQ_Y_Q5_1_RDNA1;
- nwarps = NWARPS_Q5_1_RDNA1;
- } else if (compute_capability >= CC_VOLTA) {
- mmq_x = MMQ_X_Q5_1_AMPERE;
- mmq_y = MMQ_Y_Q5_1_AMPERE;
- nwarps = NWARPS_Q5_1_AMPERE;
- } else if (compute_capability >= MIN_CC_DP4A) {
- mmq_x = MMQ_X_Q5_1_PASCAL;
- mmq_y = MMQ_Y_Q5_1_PASCAL;
- nwarps = NWARPS_Q5_1_PASCAL;
- } else {
- GGML_ASSERT(false);
- }
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q5_1<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q5_1<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- static void ggml_mul_mat_q8_0_q8_1_cuda(
- const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id = ggml_cuda_get_device();
- const int compute_capability = ggml_cuda_info().devices[id].cc;
- int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_RDNA2) {
- mmq_x = MMQ_X_Q8_0_RDNA2;
- mmq_y = MMQ_Y_Q8_0_RDNA2;
- nwarps = NWARPS_Q8_0_RDNA2;
- } else if (compute_capability >= CC_OFFSET_AMD) {
- mmq_x = MMQ_X_Q8_0_RDNA1;
- mmq_y = MMQ_Y_Q8_0_RDNA1;
- nwarps = NWARPS_Q8_0_RDNA1;
- } else if (compute_capability >= CC_VOLTA) {
- mmq_x = MMQ_X_Q8_0_AMPERE;
- mmq_y = MMQ_Y_Q8_0_AMPERE;
- nwarps = NWARPS_Q8_0_AMPERE;
- } else if (compute_capability >= MIN_CC_DP4A) {
- mmq_x = MMQ_X_Q8_0_PASCAL;
- mmq_y = MMQ_Y_Q8_0_PASCAL;
- nwarps = NWARPS_Q8_0_PASCAL;
- } else {
- GGML_ASSERT(false);
- }
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q8_0<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q8_0<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- static void ggml_mul_mat_q2_K_q8_1_cuda(
- const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id = ggml_cuda_get_device();
- const int compute_capability = ggml_cuda_info().devices[id].cc;
- int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_RDNA2) {
- mmq_x = MMQ_X_Q2_K_RDNA2;
- mmq_y = MMQ_Y_Q2_K_RDNA2;
- nwarps = NWARPS_Q2_K_RDNA2;
- } else if (compute_capability >= CC_OFFSET_AMD) {
- mmq_x = MMQ_X_Q2_K_RDNA1;
- mmq_y = MMQ_Y_Q2_K_RDNA1;
- nwarps = NWARPS_Q2_K_RDNA1;
- } else if (compute_capability >= CC_VOLTA) {
- mmq_x = MMQ_X_Q2_K_AMPERE;
- mmq_y = MMQ_Y_Q2_K_AMPERE;
- nwarps = NWARPS_Q2_K_AMPERE;
- } else if (compute_capability >= MIN_CC_DP4A) {
- mmq_x = MMQ_X_Q2_K_PASCAL;
- mmq_y = MMQ_Y_Q2_K_PASCAL;
- nwarps = NWARPS_Q2_K_PASCAL;
- } else {
- GGML_ASSERT(false);
- }
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q2_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q2_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- static void ggml_mul_mat_q3_K_q8_1_cuda(
- const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- #if QK_K == 256
- int id = ggml_cuda_get_device();
- const int compute_capability = ggml_cuda_info().devices[id].cc;
- int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_RDNA2) {
- mmq_x = MMQ_X_Q3_K_RDNA2;
- mmq_y = MMQ_Y_Q3_K_RDNA2;
- nwarps = NWARPS_Q3_K_RDNA2;
- } else if (compute_capability >= CC_OFFSET_AMD) {
- mmq_x = MMQ_X_Q3_K_RDNA1;
- mmq_y = MMQ_Y_Q3_K_RDNA1;
- nwarps = NWARPS_Q3_K_RDNA1;
- } else if (compute_capability >= CC_VOLTA) {
- mmq_x = MMQ_X_Q3_K_AMPERE;
- mmq_y = MMQ_Y_Q3_K_AMPERE;
- nwarps = NWARPS_Q3_K_AMPERE;
- } else if (compute_capability >= MIN_CC_DP4A) {
- mmq_x = MMQ_X_Q3_K_PASCAL;
- mmq_y = MMQ_Y_Q3_K_PASCAL;
- nwarps = NWARPS_Q3_K_PASCAL;
- } else {
- GGML_ASSERT(false);
- }
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q3_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q3_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- #endif
- }
- static void ggml_mul_mat_q4_K_q8_1_cuda(
- const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id = ggml_cuda_get_device();
- const int compute_capability = ggml_cuda_info().devices[id].cc;
- int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_RDNA2) {
- mmq_x = MMQ_X_Q4_K_RDNA2;
- mmq_y = MMQ_Y_Q4_K_RDNA2;
- nwarps = NWARPS_Q4_K_RDNA2;
- } else if (compute_capability >= CC_OFFSET_AMD) {
- mmq_x = MMQ_X_Q4_K_RDNA1;
- mmq_y = MMQ_Y_Q4_K_RDNA1;
- nwarps = NWARPS_Q4_K_RDNA1;
- } else if (compute_capability >= CC_VOLTA) {
- mmq_x = MMQ_X_Q4_K_AMPERE;
- mmq_y = MMQ_Y_Q4_K_AMPERE;
- nwarps = NWARPS_Q4_K_AMPERE;
- } else if (compute_capability >= MIN_CC_DP4A) {
- mmq_x = MMQ_X_Q4_K_PASCAL;
- mmq_y = MMQ_Y_Q4_K_PASCAL;
- nwarps = NWARPS_Q4_K_PASCAL;
- } else {
- GGML_ASSERT(false);
- }
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q4_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q4_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- static void ggml_mul_mat_q5_K_q8_1_cuda(
- const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id = ggml_cuda_get_device();
- const int compute_capability = ggml_cuda_info().devices[id].cc;
- int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_RDNA2) {
- mmq_x = MMQ_X_Q5_K_RDNA2;
- mmq_y = MMQ_Y_Q5_K_RDNA2;
- nwarps = NWARPS_Q5_K_RDNA2;
- } else if (compute_capability >= CC_OFFSET_AMD) {
- mmq_x = MMQ_X_Q5_K_RDNA1;
- mmq_y = MMQ_Y_Q5_K_RDNA1;
- nwarps = NWARPS_Q5_K_RDNA1;
- } else if (compute_capability >= CC_VOLTA) {
- mmq_x = MMQ_X_Q5_K_AMPERE;
- mmq_y = MMQ_Y_Q5_K_AMPERE;
- nwarps = NWARPS_Q5_K_AMPERE;
- } else if (compute_capability >= MIN_CC_DP4A) {
- mmq_x = MMQ_X_Q5_K_PASCAL;
- mmq_y = MMQ_Y_Q5_K_PASCAL;
- nwarps = NWARPS_Q5_K_PASCAL;
- } else {
- GGML_ASSERT(false);
- }
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q5_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q5_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- static void ggml_mul_mat_q6_K_q8_1_cuda(
- const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int id = ggml_cuda_get_device();
- const int compute_capability = ggml_cuda_info().devices[id].cc;
- int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_RDNA2) {
- mmq_x = MMQ_X_Q6_K_RDNA2;
- mmq_y = MMQ_Y_Q6_K_RDNA2;
- nwarps = NWARPS_Q6_K_RDNA2;
- } else if (compute_capability >= CC_OFFSET_AMD) {
- mmq_x = MMQ_X_Q6_K_RDNA1;
- mmq_y = MMQ_Y_Q6_K_RDNA1;
- nwarps = NWARPS_Q6_K_RDNA1;
- } else if (compute_capability >= CC_VOLTA) {
- mmq_x = MMQ_X_Q6_K_AMPERE;
- mmq_y = MMQ_Y_Q6_K_AMPERE;
- nwarps = NWARPS_Q6_K_AMPERE;
- } else if (compute_capability >= MIN_CC_DP4A) {
- mmq_x = MMQ_X_Q6_K_PASCAL;
- mmq_y = MMQ_Y_Q6_K_PASCAL;
- nwarps = NWARPS_Q6_K_PASCAL;
- } else {
- GGML_ASSERT(false);
- }
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q6_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q6_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- void ggml_cuda_op_mul_mat_q(
- ggml_backend_cuda_context & ctx,
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
- const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
- const int64_t src1_padded_row_size, cudaStream_t stream) {
- const int64_t ne00 = src0->ne[0];
- const int64_t ne10 = src1->ne[0];
- GGML_ASSERT(ne10 % QK8_1 == 0);
- const int64_t ne0 = dst->ne[0];
- const int64_t row_diff = row_high - row_low;
- int id = ggml_cuda_get_device();
- // the main device has a larger memory buffer to hold the results from all GPUs
- // nrows_dst == nrows of the matrix that the kernel writes into
- const int64_t nrows_dst = id == ctx.device ? ne0 : row_diff;
- switch (src0->type) {
- case GGML_TYPE_Q4_0:
- ggml_mul_mat_q4_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
- break;
- case GGML_TYPE_Q4_1:
- ggml_mul_mat_q4_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
- break;
- case GGML_TYPE_Q5_0:
- ggml_mul_mat_q5_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
- break;
- case GGML_TYPE_Q5_1:
- ggml_mul_mat_q5_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
- break;
- case GGML_TYPE_Q8_0:
- ggml_mul_mat_q8_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
- break;
- case GGML_TYPE_Q2_K:
- ggml_mul_mat_q2_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
- break;
- case GGML_TYPE_Q3_K:
- ggml_mul_mat_q3_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
- break;
- case GGML_TYPE_Q4_K:
- ggml_mul_mat_q4_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
- break;
- case GGML_TYPE_Q5_K:
- ggml_mul_mat_q5_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
- break;
- case GGML_TYPE_Q6_K:
- ggml_mul_mat_q6_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
- break;
- default:
- GGML_ASSERT(false);
- break;
- }
- GGML_UNUSED(src1);
- GGML_UNUSED(dst);
- GGML_UNUSED(src1_ddf_i);
- }
- bool ggml_cuda_supports_mmq(enum ggml_type type) {
- switch (type) {
- case GGML_TYPE_Q4_0:
- case GGML_TYPE_Q4_1:
- case GGML_TYPE_Q5_0:
- case GGML_TYPE_Q5_1:
- case GGML_TYPE_Q8_0:
- case GGML_TYPE_Q2_K:
- case GGML_TYPE_Q3_K:
- case GGML_TYPE_Q4_K:
- case GGML_TYPE_Q5_K:
- case GGML_TYPE_Q6_K:
- return true;
- default:
- return false;
- }
- }
|