1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206 |
- /**
- * llama.cpp - commit 8962422b1c6f9b8b15f5aeaea42600bcc2d44177 - do not edit this file
- *
- * MIT License
- *
- * Copyright (c) 2023-2024 The ggml authors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
- // SPDX-FileCopyrightText: Copyright 2024 Arm Ltd.
- #define GGML_COMMON_IMPL_C
- #include "ggml-common.h"
- #include "ggml-quants.h"
- #include "ggml-impl.h"
- #include <math.h>
- #include <string.h>
- #include <assert.h>
- #include <float.h>
- #include <stdlib.h> // for qsort
- #include <stdio.h> // for GGML_ASSERT
- #include "ggml-aarch64.h"
- #if defined(__GNUC__)
- #pragma GCC diagnostic ignored "-Woverlength-strings"
- #elif defined(_MSC_VER)
- #pragma warning(disable: 4244 4267) // possible loss of data
- #endif
- #define UNUSED GGML_UNUSED
- // Functions to create the interleaved data layout formats
- // interleave 4 block_q4_0s in blocks of blck_size_interleave
- // returns an interleaved block_q4_0x4
- // in the interleaved block_q4_0x4, place deltas for 4 block_q4_0 blocks
- // first, then interleave quants from 4 block_q4_0s in blocks of blck_size_interleave
- //
- // - in : an array of block_q4_0 pointers
- // - blck_size_interleave : the block_q4_0 quants bytes are interleaved in blocks of
- // blck_size_interleave bytes
- // - xor_mask : the mask to convert the nibbles in block_q4_0 quants bytes
- // from bias offset form to pure sign form (this saves subtract
- // operations durin unpacking)
- //
- static block_q4_0x4 make_block_q4_0x4(block_q4_0 * in, unsigned int blck_size_interleave, unsigned int xor_mask) {
- block_q4_0x4 out;
- for (int i = 0; i < 4; i++) {
- out.d[i] = in[i].d;
- }
- for (int i = 0; i < QK4_0 * 2; i++) {
- int src_offset = (i / (4 * blck_size_interleave)) * blck_size_interleave;
- int src_id = (i % (4 * blck_size_interleave)) / blck_size_interleave;
- src_offset += (i % blck_size_interleave);
- out.qs[i] = in[src_id].qs[src_offset] ^ xor_mask;
- }
- return out;
- }
- // interleave 8 block_q4_0s in blocks of blck_size_interleave
- // returns an interleaved block_q4_0x8
- // in the interleaved block_q4_0x8, place deltas for 8 block_q4_0 blocks
- // first, then interleave quants from 8 block_q4_0s in blocks of blck_size_interleave
- static block_q4_0x8 make_block_q4_0x8(block_q4_0 * in, unsigned int blck_size_interleave, unsigned int xor_mask) {
- block_q4_0x8 out;
- for (int i = 0; i < 8; i++) {
- out.d[i] = in[i].d;
- }
- for (int i = 0; i < QK4_0 * 4; i++) {
- int src_offset = (i / (8 * blck_size_interleave)) * blck_size_interleave;
- int src_id = (i % (8 * blck_size_interleave)) / blck_size_interleave;
- src_offset += (i % blck_size_interleave);
- out.qs[i] = in[src_id].qs[src_offset] ^ xor_mask;
- }
- return out;
- }
- void quantize_q8_0_4x4(const float * restrict x, void * restrict vy, int64_t k) {
- assert(QK8_0 == 32);
- assert(k % QK8_0 == 0);
- const int nb = k / QK8_0;
- block_q8_0x4 * restrict y = (block_q8_0x4 *) vy;
- #if defined(__ARM_NEON)
- float32x4_t srcv[4][8];
- float id[4];
- for (int i = 0; i < nb; i++) {
- float32x4_t asrcv[8];
- float32x4_t amaxv[8];
- for (int row_iter = 0; row_iter < 4; row_iter++) {
- for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j);
- for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]);
- for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]);
- for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]);
- for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]);
- const float amax = vmaxvq_f32(amaxv[0]);
- const float d = amax / ((1 << 7) - 1);
- id[row_iter] = d ? 1.0f / d : 0.0f;
- y[i].d[row_iter] = GGML_FP32_TO_FP16(d);
- }
- for (int j = 0; j < 8; j++) {
- float32x4_t v = vmulq_n_f32(srcv[0][j], id[0]);
- int32x4_t vi = vcvtnq_s32_f32(v);
- y[i].qs[16 * j + 0] = vgetq_lane_s32(vi, 0);
- y[i].qs[16 * j + 1] = vgetq_lane_s32(vi, 1);
- y[i].qs[16 * j + 2] = vgetq_lane_s32(vi, 2);
- y[i].qs[16 * j + 3] = vgetq_lane_s32(vi, 3);
- v = vmulq_n_f32(srcv[1][j], id[1]);
- vi = vcvtnq_s32_f32(v);
- y[i].qs[16 * j + 4] = vgetq_lane_s32(vi, 0);
- y[i].qs[16 * j + 5] = vgetq_lane_s32(vi, 1);
- y[i].qs[16 * j + 6] = vgetq_lane_s32(vi, 2);
- y[i].qs[16 * j + 7] = vgetq_lane_s32(vi, 3);
- v = vmulq_n_f32(srcv[2][j], id[2]);
- vi = vcvtnq_s32_f32(v);
- y[i].qs[16 * j + 8] = vgetq_lane_s32(vi, 0);
- y[i].qs[16 * j + 9] = vgetq_lane_s32(vi, 1);
- y[i].qs[16 * j + 10] = vgetq_lane_s32(vi, 2);
- y[i].qs[16 * j + 11] = vgetq_lane_s32(vi, 3);
- v = vmulq_n_f32(srcv[3][j], id[3]);
- vi = vcvtnq_s32_f32(v);
- y[i].qs[16 * j + 12] = vgetq_lane_s32(vi, 0);
- y[i].qs[16 * j + 13] = vgetq_lane_s32(vi, 1);
- y[i].qs[16 * j + 14] = vgetq_lane_s32(vi, 2);
- y[i].qs[16 * j + 15] = vgetq_lane_s32(vi, 3);
- }
- }
- #else
- // scalar
- const int blck_size_interleave = 4;
- float srcv[4][QK8_0];
- float id[4];
- for (int i = 0; i < nb; i++) {
- for (int row_iter = 0; row_iter < 4; row_iter++) {
- float amax = 0.0f; // absolute max
- for (int j = 0; j < QK8_0; j++) {
- srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j];
- amax = MAX(amax, fabsf(srcv[row_iter][j]));
- }
- const float d = amax / ((1 << 7) - 1);
- id[row_iter] = d ? 1.0f / d : 0.0f;
- y[i].d[row_iter] = GGML_FP32_TO_FP16(d);
- }
- for (int j = 0; j < QK8_0 * 4; j++) {
- int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave;
- int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave;
- src_offset += (j % blck_size_interleave);
- float x0 = srcv[src_id][src_offset] * id[src_id];
- y[i].qs[j] = roundf(x0);
- }
- }
- #endif
- }
- void quantize_q8_0_4x8(const float * restrict x, void * restrict vy, int64_t k) {
- assert(QK8_0 == 32);
- assert(k % QK8_0 == 0);
- const int nb = k / QK8_0;
- block_q8_0x4 * restrict y = (block_q8_0x4 *) vy;
- #if defined(__ARM_NEON)
- float32x4_t srcv[4][8];
- float id[4];
- for (int i = 0; i < nb; i++) {
- float32x4_t asrcv[8];
- float32x4_t amaxv[8];
- for (int row_iter = 0; row_iter < 4; row_iter++) {
- for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j);
- for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]);
- for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]);
- for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]);
- for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]);
- const float amax = vmaxvq_f32(amaxv[0]);
- const float d = amax / ((1 << 7) - 1);
- id[row_iter] = d ? 1.0f / d : 0.0f;
- y[i].d[row_iter] = GGML_FP32_TO_FP16(d);
- }
- for (int j = 0; j < 4; j++) {
- float32x4_t v = vmulq_n_f32(srcv[0][2 * j], id[0]);
- int32x4_t vi = vcvtnq_s32_f32(v);
- y[i].qs[32 * j + 0] = vgetq_lane_s32(vi, 0);
- y[i].qs[32 * j + 1] = vgetq_lane_s32(vi, 1);
- y[i].qs[32 * j + 2] = vgetq_lane_s32(vi, 2);
- y[i].qs[32 * j + 3] = vgetq_lane_s32(vi, 3);
- v = vmulq_n_f32(srcv[0][2 * j + 1], id[0]);
- vi = vcvtnq_s32_f32(v);
- y[i].qs[32 * j + 4] = vgetq_lane_s32(vi, 0);
- y[i].qs[32 * j + 5] = vgetq_lane_s32(vi, 1);
- y[i].qs[32 * j + 6] = vgetq_lane_s32(vi, 2);
- y[i].qs[32 * j + 7] = vgetq_lane_s32(vi, 3);
- v = vmulq_n_f32(srcv[1][2 * j], id[1]);
- vi = vcvtnq_s32_f32(v);
- y[i].qs[32 * j + 8] = vgetq_lane_s32(vi, 0);
- y[i].qs[32 * j + 9] = vgetq_lane_s32(vi, 1);
- y[i].qs[32 * j + 10] = vgetq_lane_s32(vi, 2);
- y[i].qs[32 * j + 11] = vgetq_lane_s32(vi, 3);
- v = vmulq_n_f32(srcv[1][2 * j + 1], id[1]);
- vi = vcvtnq_s32_f32(v);
- y[i].qs[32 * j + 12] = vgetq_lane_s32(vi, 0);
- y[i].qs[32 * j + 13] = vgetq_lane_s32(vi, 1);
- y[i].qs[32 * j + 14] = vgetq_lane_s32(vi, 2);
- y[i].qs[32 * j + 15] = vgetq_lane_s32(vi, 3);
- v = vmulq_n_f32(srcv[2][2 * j], id[2]);
- vi = vcvtnq_s32_f32(v);
- y[i].qs[32 * j + 16] = vgetq_lane_s32(vi, 0);
- y[i].qs[32 * j + 17] = vgetq_lane_s32(vi, 1);
- y[i].qs[32 * j + 18] = vgetq_lane_s32(vi, 2);
- y[i].qs[32 * j + 19] = vgetq_lane_s32(vi, 3);
- v = vmulq_n_f32(srcv[2][2 * j + 1], id[2]);
- vi = vcvtnq_s32_f32(v);
- y[i].qs[32 * j + 20] = vgetq_lane_s32(vi, 0);
- y[i].qs[32 * j + 21] = vgetq_lane_s32(vi, 1);
- y[i].qs[32 * j + 22] = vgetq_lane_s32(vi, 2);
- y[i].qs[32 * j + 23] = vgetq_lane_s32(vi, 3);
- v = vmulq_n_f32(srcv[3][2 * j], id[3]);
- vi = vcvtnq_s32_f32(v);
- y[i].qs[32 * j + 24] = vgetq_lane_s32(vi, 0);
- y[i].qs[32 * j + 25] = vgetq_lane_s32(vi, 1);
- y[i].qs[32 * j + 26] = vgetq_lane_s32(vi, 2);
- y[i].qs[32 * j + 27] = vgetq_lane_s32(vi, 3);
- v = vmulq_n_f32(srcv[3][2 * j + 1], id[3]);
- vi = vcvtnq_s32_f32(v);
- y[i].qs[32 * j + 28] = vgetq_lane_s32(vi, 0);
- y[i].qs[32 * j + 29] = vgetq_lane_s32(vi, 1);
- y[i].qs[32 * j + 30] = vgetq_lane_s32(vi, 2);
- y[i].qs[32 * j + 31] = vgetq_lane_s32(vi, 3);
- }
- }
- #else
- // scalar
- const int blck_size_interleave = 8;
- float srcv[4][QK8_0];
- float id[4];
- for (int i = 0; i < nb; i++) {
- for (int row_iter = 0; row_iter < 4; row_iter++) {
- float amax = 0.0f; // absolute max
- for (int j = 0; j < QK8_0; j++) {
- srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j];
- amax = MAX(amax, fabsf(srcv[row_iter][j]));
- }
- const float d = amax / ((1 << 7) - 1);
- id[row_iter] = d ? 1.0f / d : 0.0f;
- y[i].d[row_iter] = GGML_FP32_TO_FP16(d);
- }
- for (int j = 0; j < QK8_0 * 4; j++) {
- int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave;
- int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave;
- src_offset += (j % blck_size_interleave);
- float x0 = srcv[src_id][src_offset] * id[src_id];
- y[i].qs[j] = roundf(x0);
- }
- }
- #endif
- }
- void quantize_mat_q8_0(const float * restrict x, void * restrict vy, int64_t nrow, int64_t n_per_row, int64_t blck_size_interleave) {
- assert(nrow == 4);
- UNUSED(nrow);
- if (blck_size_interleave == 4) {
- quantize_q8_0_4x4(x, vy, n_per_row);
- } else if (blck_size_interleave == 8) {
- quantize_q8_0_4x8(x, vy, n_per_row);
- } else {
- assert(false);
- }
- }
- static size_t quantize_q4_0_nr_bl(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, int nrows_interleaved, int blck_size_interleave) {
- assert(n_per_row % QK4_0 == 0);
- const int nb = n_per_row / QK4_0;
- void * out_ptr = NULL;
- if (nrows_interleaved == 8) {
- out_ptr = (block_q4_0x8 *) dst;
- }
- else if (nrows_interleaved == 4) {
- out_ptr = (block_q4_0x4 *) dst;
- }
- assert(nrows_interleaved <= 8);
- block_q4_0 dst_tmp[8];
- for (int b = 0; b < (nrow * n_per_row); b += nrows_interleaved * n_per_row) {
- for (int64_t x = 0; x < nb; x++) {
- for (int i = 0; i < nrows_interleaved; i++ ) {
- quantize_row_q4_0_ref(src + b + i * n_per_row + x * QK4_0, (block_q4_0 *) dst_tmp + i, QK4_0);
- }
- if (nrows_interleaved == 8) {
- *(block_q4_0x8 *) out_ptr = make_block_q4_0x8(dst_tmp, blck_size_interleave, 0x88);
- out_ptr = (block_q4_0x8 *) out_ptr + 1;
- }
- else if (nrows_interleaved == 4) {
- *(block_q4_0x4 *) out_ptr = make_block_q4_0x4(dst_tmp, blck_size_interleave, 0x88);
- out_ptr = (block_q4_0x4 *) out_ptr + 1;
- }
- }
- }
- return ((nrow * n_per_row) / QK4_0 * sizeof(block_q4_0));
- }
- size_t quantize_q4_0_4x4(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
- UNUSED(quant_weights);
- return quantize_q4_0_nr_bl(src, dst, nrow, n_per_row, 4, 4);
- }
- size_t quantize_q4_0_4x8(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
- UNUSED(quant_weights);
- return quantize_q4_0_nr_bl(src, dst, nrow, n_per_row, 4, 8);
- }
- size_t quantize_q4_0_8x8(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
- UNUSED(quant_weights);
- return quantize_q4_0_nr_bl(src, dst, nrow, n_per_row, 8, 8);
- }
- void ggml_gemv_q4_0_4x4_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
- const int qk = QK8_0;
- const int nb = n / qk;
- const int ncols_interleaved = 4;
- const int blocklen = 4;
- assert (n % qk == 0);
- assert (nc % ncols_interleaved == 0);
- UNUSED(s);
- UNUSED(bs);
- UNUSED(vx);
- UNUSED(vy);
- UNUSED(nr);
- UNUSED(nc);
- UNUSED(nb);
- UNUSED(ncols_interleaved);
- UNUSED(blocklen);
- #if defined(__ARM_FEATURE_SVE)
- if (ggml_sve_cnt_b == QK8_0) {
- GGML_ASSERT(!(ggml_cpu_has_sve() && (ggml_sve_cnt_b == QK8_0)) &&
- "__ARM_FEATURE_SVE defined, use the Q4_0_8_8 quantization format for optimal performance");
- }
- #endif
- #if defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
- GGML_ASSERT(!(ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) &&
- "__ARM_NEON and __ARM_FEATURE_MATMUL_INT8 defined, use the Q4_0_4_8 quantization format for optimal performance");
- #elif defined(__ARM_NEON) && defined(__aarch64__) && ! ((defined(_MSC_VER)) && ! defined(__clang__))
- const void * b_ptr = vx;
- const void * a_ptr = vy;
- float * res_ptr = s;
- __asm__ __volatile__(
- "movi v31.16b, #0x4\n"
- "movi v30.16b, #0xf0\n"
- "add %x[b_ptr], %x[b_ptr], #0x8\n"
- "1:" // Column loop
- "add x22, %x[a_ptr], #0x2\n"
- "movi v29.16b, #0x0\n"
- "mov x21, %x[nb]\n"
- "2:" // Block loop
- "ldr q28, [%x[b_ptr], #0x0]\n"
- "ldr q27, [x22, #0x0]\n"
- "movi v26.4s, #0x0\n"
- "sub x20, x22, #0x2\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q24, [%x[b_ptr], #0x10]\n"
- "sub x21, x21, #0x1\n"
- "add x22, x22, #0x22\n"
- "ldr q23, [%x[b_ptr], #0x20]\n"
- "ldr q22, [%x[b_ptr], #0x30]\n"
- "ld1r { v21.8h }, [x20]\n"
- "ldr q20, [%x[b_ptr], #-0x8]\n"
- "sshl v16.16b, v28.16b, v31.16b\n"
- "and v28.16b, v28.16b, v30.16b\n"
- "sshl v19.16b, v24.16b, v31.16b\n"
- "and v24.16b, v24.16b, v30.16b\n"
- "add %x[b_ptr], %x[b_ptr], #0x48\n"
- "sshl v18.16b, v23.16b, v31.16b\n"
- "and v23.16b, v23.16b, v30.16b\n"
- ".inst 0x4f9be21a // sdot v26.4s, v16.16b, v27.4b[0]\n"
- "sshl v17.16b, v22.16b, v31.16b\n"
- "and v22.16b, v22.16b, v30.16b\n"
- "fcvtl v21.4s, v21.4h\n"
- "fcvtl v16.4s, v20.4h\n"
- ".inst 0x4f99e39a // sdot v26.4s, v28.16b, v25.4b[0]\n"
- "fmul v16.4s, v16.4s, v21.4s\n"
- ".inst 0x4fbbe27a // sdot v26.4s, v19.16b, v27.4b[1]\n"
- ".inst 0x4fb9e31a // sdot v26.4s, v24.16b, v25.4b[1]\n"
- ".inst 0x4f9bea5a // sdot v26.4s, v18.16b, v27.4b[2]\n"
- ".inst 0x4f99eafa // sdot v26.4s, v23.16b, v25.4b[2]\n"
- ".inst 0x4fbbea3a // sdot v26.4s, v17.16b, v27.4b[3]\n"
- ".inst 0x4fb9eada // sdot v26.4s, v22.16b, v25.4b[3]\n"
- "scvtf v26.4s, v26.4s, #0x4\n"
- "fmla v29.4s, v26.4s, v16.4s\n"
- "cbnz x21, 2b\n"
- "sub %x[nc], %x[nc], #0x4\n"
- "str q29, [%x[res_ptr], #0x0]\n"
- "add %x[res_ptr], %x[res_ptr], #0x10\n"
- "cbnz %x[nc], 1b\n"
- : [b_ptr] "+&r" (b_ptr), [res_ptr] "+&r" (res_ptr), [nc] "+&r" (nc)
- : [a_ptr] "r" (a_ptr), [nb] "r" (nb)
- : "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22"
- );
- #else
- float sumf[4];
- int sumi;
- const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb);
- for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0;
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4;
- }
- sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d);
- }
- }
- }
- for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j];
- }
- #endif
- }
- void ggml_gemv_q4_0_4x8_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
- const int qk = QK8_0;
- const int nb = n / qk;
- const int ncols_interleaved = 4;
- const int blocklen = 8;
- assert (n % qk == 0);
- assert (nc % ncols_interleaved == 0);
- UNUSED(s);
- UNUSED(bs);
- UNUSED(vx);
- UNUSED(vy);
- UNUSED(nr);
- UNUSED(nc);
- UNUSED(nb);
- UNUSED(ncols_interleaved);
- UNUSED(blocklen);
- #if defined(__ARM_FEATURE_SVE)
- if (ggml_sve_cnt_b == QK8_0) {
- GGML_ASSERT(!(ggml_cpu_has_sve() && (ggml_sve_cnt_b == QK8_0)) &&
- "__ARM_FEATURE_SVE defined, use the Q4_0_8_8 quantization format for optimal performance");
- }
- #endif
- #if defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) && ! ((defined(_MSC_VER)) && ! defined(__clang__))
- const void * b_ptr = vx;
- const void * a_ptr = vy;
- float * res_ptr = s;
- __asm__ __volatile__(
- "movi v2.16b, #0x4\n"
- "movi v1.16b, #0xf0\n"
- "add %x[b_ptr], %x[b_ptr], #0x8\n"
- "1:" // Column loop
- "add x23, %x[a_ptr], #0x2\n"
- "movi v0.16b, #0x0\n"
- "mov x22, %x[nb]\n"
- "2:" // Block loop
- "ldr q31, [%x[b_ptr], #0x0]\n"
- "ldr q30, [%x[b_ptr], #0x10]\n"
- "mov x21, x23\n"
- "movi v29.4s, #0x0\n"
- "ldr q28, [%x[b_ptr], #0x20]\n"
- "ldr q27, [%x[b_ptr], #0x30]\n"
- "movi v26.4s, #0x0\n"
- "sub x20, x23, #0x2\n"
- "ld1r { v25.8h }, [x20]\n"
- "ldr q24, [%x[b_ptr], #-0x8]\n"
- "sub x22, x22, #0x1\n"
- "add x23, x23, #0x22\n"
- "ld1r { v23.2d }, [x21], #0x8\n"
- "sshl v22.16b, v31.16b, v2.16b\n"
- "sshl v16.16b, v30.16b, v2.16b\n"
- "add %x[b_ptr], %x[b_ptr], #0x48\n"
- "ld1r { v21.2d }, [x21], #0x8\n"
- "sshl v20.16b, v28.16b, v2.16b\n"
- "sshl v19.16b, v27.16b, v2.16b\n"
- "ld1r { v18.2d }, [x21], #0x8\n"
- "ld1r { v17.2d }, [x21], #0x8\n"
- "and v31.16b, v31.16b, v1.16b\n"
- "and v30.16b, v30.16b, v1.16b\n"
- ".inst 0x4e9796dd // sdot v29.4s, v22.16b, v23.16b\n"
- ".inst 0x4e97961a // sdot v26.4s, v16.16b, v23.16b\n"
- "and v28.16b, v28.16b, v1.16b\n"
- "and v27.16b, v27.16b, v1.16b\n"
- "fcvtl v25.4s, v25.4h\n"
- "fcvtl v16.4s, v24.4h\n"
- ".inst 0x4e95969d // sdot v29.4s, v20.16b, v21.16b\n"
- ".inst 0x4e95967a // sdot v26.4s, v19.16b, v21.16b\n"
- "fmul v16.4s, v16.4s, v25.4s\n"
- ".inst 0x4e9297fd // sdot v29.4s, v31.16b, v18.16b\n"
- ".inst 0x4e9297da // sdot v26.4s, v30.16b, v18.16b\n"
- ".inst 0x4e91979d // sdot v29.4s, v28.16b, v17.16b\n"
- ".inst 0x4e91977a // sdot v26.4s, v27.16b, v17.16b\n"
- "addp v29.4s, v29.4s, v26.4s\n"
- "scvtf v29.4s, v29.4s, #0x4\n"
- "fmla v0.4s, v29.4s, v16.4s\n"
- "cbnz x22, 2b\n"
- "sub %x[nc], %x[nc], #0x4\n"
- "str q0, [%x[res_ptr], #0x0]\n"
- "add %x[res_ptr], %x[res_ptr], #0x10\n"
- "cbnz %x[nc], 1b\n"
- : [b_ptr] "+&r" (b_ptr), [res_ptr] "+&r" (res_ptr), [nc] "+&r" (nc)
- : [a_ptr] "r" (a_ptr), [nb] "r" (nb)
- : "memory", "v0", "v1", "v2", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23"
- );
- #elif defined(__ARM_NEON) && defined(__aarch64__)
- GGML_ASSERT((ggml_cpu_has_sve() || ggml_cpu_has_matmul_int8()) &&
- "__ARM_FEATURE_SVE and __ARM_FEATURE_MATMUL_INT8 not defined, use the Q4_0_4_4 quantization format for optimal "
- "performance");
- #else
- float sumf[4];
- int sumi;
- const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb);
- for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0;
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4;
- }
- sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d);
- }
- }
- }
- for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j];
- }
- #endif
- }
- void ggml_gemv_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
- const int qk = QK8_0;
- const int nb = n / qk;
- const int ncols_interleaved = 8;
- const int blocklen = 8;
- assert (n % qk == 0);
- assert (nc % ncols_interleaved == 0);
- UNUSED(s);
- UNUSED(bs);
- UNUSED(vx);
- UNUSED(vy);
- UNUSED(nr);
- UNUSED(nc);
- UNUSED(nb);
- UNUSED(ncols_interleaved);
- UNUSED(blocklen);
- #if defined(__ARM_FEATURE_SVE) && ! ((defined(_MSC_VER)) && ! defined(__clang__))
- if (ggml_sve_cnt_b == QK8_0) {
- const void * b_ptr = vx;
- const void * a_ptr = vy;
- float * res_ptr = s;
- __asm__ __volatile__(
- "ptrue p0.b\n"
- "add %x[b_ptr], %x[b_ptr], #0x10\n"
- "1:" // Column loop
- "add x22, %x[a_ptr], #0x2\n"
- "mov z31.b, #0x0\n"
- "mov x21, %x[nb]\n"
- "2:" // Block loop
- "ld1b { z30.b }, p0/Z, [%x[b_ptr]]\n"
- "ld1b { z29.b }, p0/Z, [%x[b_ptr], #1, MUL VL]\n"
- "mov z28.s, #0x0\n"
- "mov z27.s, #0x0\n"
- "ld1rd { z26.d }, p0/Z, [x22]\n"
- "ld1b { z25.b }, p0/Z, [%x[b_ptr], #2, MUL VL]\n"
- "sub x20, x22, #0x2\n"
- "sub x21, x21, #0x1\n"
- "ld1b { z24.b }, p0/Z, [%x[b_ptr], #3, MUL VL]\n"
- "ld1rd { z23.d }, p0/Z, [x22, #8]\n"
- "lsl z22.b, z30.b, #0x4\n"
- "lsl z16.b, z29.b, #0x4\n"
- "and z30.b, z30.b, #0xf0\n"
- "and z29.b, z29.b, #0xf0\n"
- "ld1rd { z21.d }, p0/Z, [x22, #16]\n"
- "ld1rd { z20.d }, p0/Z, [x22, #24]\n"
- "lsl z19.b, z25.b, #0x4\n"
- "and z25.b, z25.b, #0xf0\n"
- "ld1rh { z17.h }, p0/Z, [x20]\n"
- "ld1h { z18.s }, p0/Z, [%x[b_ptr], #-1, MUL VL]\n"
- "sdot z28.s, z22.b, z26.b\n"
- "sdot z27.s, z16.b, z26.b\n"
- "lsl z16.b, z24.b, #0x4\n"
- "add x22, x22, #0x22\n"
- "and z24.b, z24.b, #0xf0\n"
- "add %x[b_ptr], %x[b_ptr], #0x90\n"
- "fcvt z17.s, p0/m, z17.h\n"
- "fcvt z18.s, p0/m, z18.h\n"
- "sdot z28.s, z19.b, z23.b\n"
- "sdot z27.s, z16.b, z23.b\n"
- "fmul z18.s, z18.s, z17.s\n"
- "sdot z28.s, z30.b, z21.b\n"
- "sdot z27.s, z29.b, z21.b\n"
- "sdot z28.s, z25.b, z20.b\n"
- "sdot z27.s, z24.b, z20.b\n"
- "uzp1 z17.s, z28.s, z27.s\n"
- "uzp2 z16.s, z28.s, z27.s\n"
- "add z17.s, z17.s, z16.s\n"
- "asr z17.s, z17.s, #0x4\n"
- "scvtf z17.s, p0/m, z17.s\n"
- "fmla z31.s, p0/M, z17.s, z18.s\n"
- "cbnz x21, 2b\n"
- "sub %x[nc], %x[nc], #0x8\n"
- "st1w { z31.s }, p0, [%x[res_ptr]]\n"
- "add %x[res_ptr], %x[res_ptr], #0x20\n"
- "cbnz %x[nc], 1b\n"
- : [b_ptr] "+&r" (b_ptr), [res_ptr] "+&r" (res_ptr), [nc] "+&r" (nc)
- : [a_ptr] "r" (a_ptr), [nb] "r" (nb)
- : "memory", "p0", "x20", "x21", "x22", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
- );
- return;
- }
- else if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) {
- GGML_ASSERT((ggml_cpu_has_sve() && (ggml_sve_cnt_b == QK8_0)) &&
- "__ARM_FEATURE_SVE for vector size of 256-bits not defined, use the Q4_0_4_8 quantization format for optimal "
- "performance");
- }
- else if (ggml_cpu_has_neon()) {
- GGML_ASSERT(((ggml_cpu_has_sve() && (ggml_sve_cnt_b == QK8_0)) || ggml_cpu_has_matmul_int8()) &&
- "__ARM_FEATURE_SVE for vector size of 256-bits and __ARM_FEATURE_MATMUL_INT8 not defined, use the Q4_0_4_4 "
- "quantization format for optimal performance");
- }
- #endif
- #if defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
- GGML_ASSERT(ggml_cpu_has_sve() &&
- "__ARM_FEATURE_SVE not defined, use the Q4_0_4_8 quantization format for optimal performance");
- #elif defined(__ARM_NEON) && defined(__aarch64__)
- GGML_ASSERT((ggml_cpu_has_sve() || ggml_cpu_has_matmul_int8()) &&
- "__ARM_FEATURE_SVE and __ARM_FEATURE_MATMUL_INT8 not defined, use the Q4_0_4_4 quantization format for optimal "
- "performance");
- #else
- float sumf[8];
- int sumi;
- const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb);
- for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0;
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4;
- }
- sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d);
- }
- }
- }
- for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j];
- }
- #endif
- }
- void ggml_gemm_q4_0_4x4_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
- const int qk = QK8_0;
- const int nb = n / qk;
- const int ncols_interleaved = 4;
- const int blocklen = 4;
- assert (n % qk == 0);
- assert (nr % 4 == 0);
- assert (nc % ncols_interleaved == 0);
- UNUSED(s);
- UNUSED(bs);
- UNUSED(vx);
- UNUSED(vy);
- UNUSED(nr);
- UNUSED(nc);
- UNUSED(nb);
- UNUSED(ncols_interleaved);
- UNUSED(blocklen);
- #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8)
- if (ggml_sve_cnt_b == QK8_0) {
- GGML_ASSERT(!(ggml_cpu_has_sve() && (ggml_sve_cnt_b == QK8_0)) &&
- "__ARM_FEATURE_SVE defined, use the Q4_0_8_8 quantization format for optimal performance");
- }
- #endif
- #if defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
- GGML_ASSERT(!(ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) &&
- "__ARM_NEON and __ARM_FEATURE_MATMUL_INT8 defined, use the Q4_0_4_8 quantization format for optimal performance");
- #elif defined(__ARM_NEON) && defined(__aarch64__) && ! ((defined(_MSC_VER)) && ! defined(__clang__))
- const void * b_ptr = vx;
- const void * a_ptr = vy;
- float * res_ptr = s;
- size_t res_stride = bs * sizeof(float);
- __asm__ __volatile__(
- "mov x10, %x[nr]\n"
- "mov x9, #0x88\n"
- "cmp x10, #0x10\n"
- "mul x9, %x[nb], x9\n"
- "blt 4f\n"
- "1:" // Row loop
- "add x28, %x[b_ptr], #0x8\n"
- "mov x27, %x[nc]\n"
- "add x26, %x[res_ptr], %x[res_stride], LSL #4\n"
- "2:" // Column loop
- "add x25, %x[a_ptr], #0x8\n"
- "movi v15.16b, #0x0\n"
- "movi v19.16b, #0x0\n"
- "mov x24, %x[nb]\n"
- "add x23, x25, x9\n"
- "movi v18.16b, #0x0\n"
- "movi v14.16b, #0x0\n"
- "add x22, x23, x9\n"
- "movi v11.16b, #0x0\n"
- "movi v13.16b, #0x0\n"
- "add x21, x22, x9\n"
- "movi v23.16b, #0x0\n"
- "movi v16.16b, #0x0\n"
- "movi v25.16b, #0x0\n"
- "movi v7.16b, #0x0\n"
- "movi v0.16b, #0x0\n"
- "movi v4.16b, #0x0\n"
- "movi v5.16b, #0x0\n"
- "movi v21.16b, #0x0\n"
- "movi v8.16b, #0x0\n"
- "movi v1.16b, #0x0\n"
- "3:" // Block loop
- "ldr q3, [x28, #0x0]\n"
- "ldr q31, [x25, #0x0]\n"
- "movi v28.16b, #0x4\n"
- "movi v10.4s, #0x0\n"
- "ldr q22, [x28, #0x10]\n"
- "ldr q6, [x25, #0x10]\n"
- "movi v29.4s, #0x0\n"
- "movi v9.4s, #0x0\n"
- "ldr q27, [x28, #0x20]\n"
- "ldr q30, [x28, #0x30]\n"
- "movi v20.4s, #0x0\n"
- "movi v24.16b, #0xf0\n"
- "ldr d2, [x25, #-0x8]\n"
- "ldr d26, [x23, #-0x8]\n"
- "sshl v12.16b, v3.16b, v28.16b\n"
- "sub x20, x28, #0x8\n"
- "ldr d17, [x20, #0x0]\n"
- "and v3.16b, v3.16b, v24.16b\n"
- "subs x24, x24, #0x1\n"
- "add x28, x28, #0x48\n"
- ".inst 0x4f9fe18a // sdot v10.4s, v12.16b, v31.4b[0]\n"
- ".inst 0x4fbfe19d // sdot v29.4s, v12.16b, v31.4b[1]\n"
- ".inst 0x4f9fe989 // sdot v9.4s, v12.16b, v31.4b[2]\n"
- ".inst 0x4fbfe994 // sdot v20.4s, v12.16b, v31.4b[3]\n"
- "sshl v31.16b, v22.16b, v28.16b\n"
- "and v22.16b, v22.16b, v24.16b\n"
- "fcvtl v17.4s, v17.4h\n"
- "fcvtl v2.4s, v2.4h\n"
- "fcvtl v26.4s, v26.4h\n"
- ".inst 0x4f86e3ea // sdot v10.4s, v31.16b, v6.4b[0]\n"
- ".inst 0x4fa6e3fd // sdot v29.4s, v31.16b, v6.4b[1]\n"
- ".inst 0x4f86ebe9 // sdot v9.4s, v31.16b, v6.4b[2]\n"
- ".inst 0x4fa6ebf4 // sdot v20.4s, v31.16b, v6.4b[3]\n"
- "sshl v6.16b, v27.16b, v28.16b\n"
- "sshl v28.16b, v30.16b, v28.16b\n"
- "and v27.16b, v27.16b, v24.16b\n"
- "and v30.16b, v30.16b, v24.16b\n"
- "ldr q24, [x25, #0x20]\n"
- ".inst 0x4f98e0ca // sdot v10.4s, v6.16b, v24.4b[0]\n"
- ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n"
- ".inst 0x4f98e8c9 // sdot v9.4s, v6.16b, v24.4b[2]\n"
- ".inst 0x4fb8e8d4 // sdot v20.4s, v6.16b, v24.4b[3]\n"
- "ldr q24, [x25, #0x30]\n"
- ".inst 0x4f98e38a // sdot v10.4s, v28.16b, v24.4b[0]\n"
- ".inst 0x4fb8e39d // sdot v29.4s, v28.16b, v24.4b[1]\n"
- ".inst 0x4f98eb89 // sdot v9.4s, v28.16b, v24.4b[2]\n"
- ".inst 0x4fb8eb94 // sdot v20.4s, v28.16b, v24.4b[3]\n"
- "ldr q24, [x25, #0x40]\n"
- ".inst 0x4f98e06a // sdot v10.4s, v3.16b, v24.4b[0]\n"
- ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n"
- ".inst 0x4f98e869 // sdot v9.4s, v3.16b, v24.4b[2]\n"
- ".inst 0x4fb8e874 // sdot v20.4s, v3.16b, v24.4b[3]\n"
- "ldr q24, [x25, #0x50]\n"
- ".inst 0x4f98e2ca // sdot v10.4s, v22.16b, v24.4b[0]\n"
- ".inst 0x4fb8e2dd // sdot v29.4s, v22.16b, v24.4b[1]\n"
- ".inst 0x4f98eac9 // sdot v9.4s, v22.16b, v24.4b[2]\n"
- ".inst 0x4fb8ead4 // sdot v20.4s, v22.16b, v24.4b[3]\n"
- "ldr q24, [x25, #0x60]\n"
- ".inst 0x4f98e36a // sdot v10.4s, v27.16b, v24.4b[0]\n"
- ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n"
- ".inst 0x4f98eb69 // sdot v9.4s, v27.16b, v24.4b[2]\n"
- ".inst 0x4fb8eb74 // sdot v20.4s, v27.16b, v24.4b[3]\n"
- "ldr q24, [x25, #0x70]\n"
- "add x25, x25, #0x88\n"
- ".inst 0x4f98e3ca // sdot v10.4s, v30.16b, v24.4b[0]\n"
- ".inst 0x4fb8e3dd // sdot v29.4s, v30.16b, v24.4b[1]\n"
- ".inst 0x4f98ebc9 // sdot v9.4s, v30.16b, v24.4b[2]\n"
- ".inst 0x4fb8ebd4 // sdot v20.4s, v30.16b, v24.4b[3]\n"
- "fmul v24.4s, v17.4s, v2.s[0]\n"
- "scvtf v10.4s, v10.4s, #0x4\n"
- "scvtf v29.4s, v29.4s, #0x4\n"
- "scvtf v9.4s, v9.4s, #0x4\n"
- "scvtf v20.4s, v20.4s, #0x4\n"
- "fmla v15.4s, v10.4s, v24.4s\n"
- "ldr q24, [x23, #0x0]\n"
- "fmul v10.4s, v17.4s, v2.s[1]\n"
- "fmla v19.4s, v29.4s, v10.4s\n"
- "ldr q10, [x23, #0x10]\n"
- "fmul v29.4s, v17.4s, v2.s[2]\n"
- "fmul v2.4s, v17.4s, v2.s[3]\n"
- "fmla v18.4s, v9.4s, v29.4s\n"
- "movi v9.4s, #0x0\n"
- "movi v29.4s, #0x0\n"
- ".inst 0x4f98e189 // sdot v9.4s, v12.16b, v24.4b[0]\n"
- ".inst 0x4fb8e19d // sdot v29.4s, v12.16b, v24.4b[1]\n"
- "fmla v14.4s, v20.4s, v2.4s\n"
- "movi v20.4s, #0x0\n"
- "movi v2.4s, #0x0\n"
- ".inst 0x4f98e994 // sdot v20.4s, v12.16b, v24.4b[2]\n"
- ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n"
- "ldr q24, [x23, #0x20]\n"
- ".inst 0x4f8ae3e9 // sdot v9.4s, v31.16b, v10.4b[0]\n"
- ".inst 0x4faae3fd // sdot v29.4s, v31.16b, v10.4b[1]\n"
- ".inst 0x4f8aebf4 // sdot v20.4s, v31.16b, v10.4b[2]\n"
- ".inst 0x4faaebe2 // sdot v2.4s, v31.16b, v10.4b[3]\n"
- "ldr q10, [x23, #0x30]\n"
- ".inst 0x4f98e0c9 // sdot v9.4s, v6.16b, v24.4b[0]\n"
- ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n"
- ".inst 0x4f98e8d4 // sdot v20.4s, v6.16b, v24.4b[2]\n"
- ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n"
- "ldr q24, [x23, #0x40]\n"
- ".inst 0x4f8ae389 // sdot v9.4s, v28.16b, v10.4b[0]\n"
- ".inst 0x4faae39d // sdot v29.4s, v28.16b, v10.4b[1]\n"
- ".inst 0x4f8aeb94 // sdot v20.4s, v28.16b, v10.4b[2]\n"
- ".inst 0x4faaeb82 // sdot v2.4s, v28.16b, v10.4b[3]\n"
- "ldr q10, [x23, #0x50]\n"
- ".inst 0x4f98e069 // sdot v9.4s, v3.16b, v24.4b[0]\n"
- ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n"
- ".inst 0x4f98e874 // sdot v20.4s, v3.16b, v24.4b[2]\n"
- ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n"
- "ldr q24, [x23, #0x60]\n"
- ".inst 0x4f8ae2c9 // sdot v9.4s, v22.16b, v10.4b[0]\n"
- ".inst 0x4faae2dd // sdot v29.4s, v22.16b, v10.4b[1]\n"
- ".inst 0x4f8aead4 // sdot v20.4s, v22.16b, v10.4b[2]\n"
- ".inst 0x4faaeac2 // sdot v2.4s, v22.16b, v10.4b[3]\n"
- "ldr q10, [x23, #0x70]\n"
- "add x23, x23, #0x88\n"
- ".inst 0x4f98e369 // sdot v9.4s, v27.16b, v24.4b[0]\n"
- ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n"
- ".inst 0x4f98eb74 // sdot v20.4s, v27.16b, v24.4b[2]\n"
- ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n"
- "ldr q24, [x22, #0x0]\n"
- ".inst 0x4f8ae3c9 // sdot v9.4s, v30.16b, v10.4b[0]\n"
- ".inst 0x4faae3dd // sdot v29.4s, v30.16b, v10.4b[1]\n"
- ".inst 0x4f8aebd4 // sdot v20.4s, v30.16b, v10.4b[2]\n"
- ".inst 0x4faaebc2 // sdot v2.4s, v30.16b, v10.4b[3]\n"
- "fmul v10.4s, v17.4s, v26.s[0]\n"
- "scvtf v9.4s, v9.4s, #0x4\n"
- "scvtf v29.4s, v29.4s, #0x4\n"
- "scvtf v20.4s, v20.4s, #0x4\n"
- "scvtf v2.4s, v2.4s, #0x4\n"
- "fmla v11.4s, v9.4s, v10.4s\n"
- "ldr q9, [x22, #0x10]\n"
- "fmul v10.4s, v17.4s, v26.s[1]\n"
- "fmla v13.4s, v29.4s, v10.4s\n"
- "ldr d29, [x22, #-0x8]\n"
- "fmul v10.4s, v17.4s, v26.s[2]\n"
- "fmul v26.4s, v17.4s, v26.s[3]\n"
- "fcvtl v29.4s, v29.4h\n"
- "fmla v23.4s, v20.4s, v10.4s\n"
- "movi v20.4s, #0x0\n"
- "movi v10.4s, #0x0\n"
- "fmla v16.4s, v2.4s, v26.4s\n"
- "movi v26.4s, #0x0\n"
- "movi v2.4s, #0x0\n"
- ".inst 0x4f98e194 // sdot v20.4s, v12.16b, v24.4b[0]\n"
- ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n"
- ".inst 0x4f98e99a // sdot v26.4s, v12.16b, v24.4b[2]\n"
- ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n"
- "ldr q24, [x22, #0x20]\n"
- ".inst 0x4f89e3f4 // sdot v20.4s, v31.16b, v9.4b[0]\n"
- ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n"
- ".inst 0x4f89ebfa // sdot v26.4s, v31.16b, v9.4b[2]\n"
- ".inst 0x4fa9ebe2 // sdot v2.4s, v31.16b, v9.4b[3]\n"
- "ldr q9, [x22, #0x30]\n"
- ".inst 0x4f98e0d4 // sdot v20.4s, v6.16b, v24.4b[0]\n"
- ".inst 0x4fb8e0ca // sdot v10.4s, v6.16b, v24.4b[1]\n"
- ".inst 0x4f98e8da // sdot v26.4s, v6.16b, v24.4b[2]\n"
- ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n"
- "ldr q24, [x22, #0x40]\n"
- ".inst 0x4f89e394 // sdot v20.4s, v28.16b, v9.4b[0]\n"
- ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n"
- ".inst 0x4f89eb9a // sdot v26.4s, v28.16b, v9.4b[2]\n"
- ".inst 0x4fa9eb82 // sdot v2.4s, v28.16b, v9.4b[3]\n"
- "ldr q9, [x22, #0x50]\n"
- ".inst 0x4f98e074 // sdot v20.4s, v3.16b, v24.4b[0]\n"
- ".inst 0x4fb8e06a // sdot v10.4s, v3.16b, v24.4b[1]\n"
- ".inst 0x4f98e87a // sdot v26.4s, v3.16b, v24.4b[2]\n"
- ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n"
- "ldr q24, [x22, #0x60]\n"
- ".inst 0x4f89e2d4 // sdot v20.4s, v22.16b, v9.4b[0]\n"
- ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n"
- ".inst 0x4f89eada // sdot v26.4s, v22.16b, v9.4b[2]\n"
- ".inst 0x4fa9eac2 // sdot v2.4s, v22.16b, v9.4b[3]\n"
- "ldr q9, [x22, #0x70]\n"
- "add x22, x22, #0x88\n"
- ".inst 0x4f98e374 // sdot v20.4s, v27.16b, v24.4b[0]\n"
- ".inst 0x4fb8e36a // sdot v10.4s, v27.16b, v24.4b[1]\n"
- ".inst 0x4f98eb7a // sdot v26.4s, v27.16b, v24.4b[2]\n"
- ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n"
- "ldr q24, [x21, #0x0]\n"
- ".inst 0x4f89e3d4 // sdot v20.4s, v30.16b, v9.4b[0]\n"
- ".inst 0x4fa9e3ca // sdot v10.4s, v30.16b, v9.4b[1]\n"
- ".inst 0x4f89ebda // sdot v26.4s, v30.16b, v9.4b[2]\n"
- ".inst 0x4fa9ebc2 // sdot v2.4s, v30.16b, v9.4b[3]\n"
- "fmul v9.4s, v17.4s, v29.s[0]\n"
- "scvtf v20.4s, v20.4s, #0x4\n"
- "scvtf v10.4s, v10.4s, #0x4\n"
- "scvtf v26.4s, v26.4s, #0x4\n"
- "scvtf v2.4s, v2.4s, #0x4\n"
- "fmla v25.4s, v20.4s, v9.4s\n"
- "ldr q9, [x21, #0x10]\n"
- "fmul v20.4s, v17.4s, v29.s[1]\n"
- "fmla v7.4s, v10.4s, v20.4s\n"
- "ldr d20, [x21, #-0x8]\n"
- "fmul v10.4s, v17.4s, v29.s[2]\n"
- "fmul v29.4s, v17.4s, v29.s[3]\n"
- "fcvtl v20.4s, v20.4h\n"
- "fmla v0.4s, v26.4s, v10.4s\n"
- "movi v26.4s, #0x0\n"
- "movi v10.4s, #0x0\n"
- "fmla v4.4s, v2.4s, v29.4s\n"
- "movi v2.4s, #0x0\n"
- "movi v29.4s, #0x0\n"
- ".inst 0x4f98e19a // sdot v26.4s, v12.16b, v24.4b[0]\n"
- ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n"
- ".inst 0x4f98e982 // sdot v2.4s, v12.16b, v24.4b[2]\n"
- ".inst 0x4fb8e99d // sdot v29.4s, v12.16b, v24.4b[3]\n"
- "ldr q12, [x21, #0x20]\n"
- "fmul v24.4s, v17.4s, v20.s[0]\n"
- ".inst 0x4f89e3fa // sdot v26.4s, v31.16b, v9.4b[0]\n"
- ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n"
- ".inst 0x4f89ebe2 // sdot v2.4s, v31.16b, v9.4b[2]\n"
- ".inst 0x4fa9ebfd // sdot v29.4s, v31.16b, v9.4b[3]\n"
- "ldr q9, [x21, #0x30]\n"
- "fmul v31.4s, v17.4s, v20.s[1]\n"
- ".inst 0x4f8ce0da // sdot v26.4s, v6.16b, v12.4b[0]\n"
- ".inst 0x4face0ca // sdot v10.4s, v6.16b, v12.4b[1]\n"
- ".inst 0x4f8ce8c2 // sdot v2.4s, v6.16b, v12.4b[2]\n"
- ".inst 0x4face8dd // sdot v29.4s, v6.16b, v12.4b[3]\n"
- "ldr q12, [x21, #0x40]\n"
- "fmul v6.4s, v17.4s, v20.s[2]\n"
- "fmul v20.4s, v17.4s, v20.s[3]\n"
- ".inst 0x4f89e39a // sdot v26.4s, v28.16b, v9.4b[0]\n"
- ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n"
- ".inst 0x4f89eb82 // sdot v2.4s, v28.16b, v9.4b[2]\n"
- ".inst 0x4fa9eb9d // sdot v29.4s, v28.16b, v9.4b[3]\n"
- "ldr q9, [x21, #0x50]\n"
- ".inst 0x4f8ce07a // sdot v26.4s, v3.16b, v12.4b[0]\n"
- ".inst 0x4face06a // sdot v10.4s, v3.16b, v12.4b[1]\n"
- ".inst 0x4f8ce862 // sdot v2.4s, v3.16b, v12.4b[2]\n"
- ".inst 0x4face87d // sdot v29.4s, v3.16b, v12.4b[3]\n"
- "ldr q12, [x21, #0x60]\n"
- ".inst 0x4f89e2da // sdot v26.4s, v22.16b, v9.4b[0]\n"
- ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n"
- ".inst 0x4f89eac2 // sdot v2.4s, v22.16b, v9.4b[2]\n"
- ".inst 0x4fa9eadd // sdot v29.4s, v22.16b, v9.4b[3]\n"
- "ldr q17, [x21, #0x70]\n"
- "add x21, x21, #0x88\n"
- ".inst 0x4f8ce37a // sdot v26.4s, v27.16b, v12.4b[0]\n"
- ".inst 0x4face36a // sdot v10.4s, v27.16b, v12.4b[1]\n"
- ".inst 0x4f8ceb62 // sdot v2.4s, v27.16b, v12.4b[2]\n"
- ".inst 0x4faceb7d // sdot v29.4s, v27.16b, v12.4b[3]\n"
- ".inst 0x4f91e3da // sdot v26.4s, v30.16b, v17.4b[0]\n"
- ".inst 0x4fb1e3ca // sdot v10.4s, v30.16b, v17.4b[1]\n"
- ".inst 0x4f91ebc2 // sdot v2.4s, v30.16b, v17.4b[2]\n"
- ".inst 0x4fb1ebdd // sdot v29.4s, v30.16b, v17.4b[3]\n"
- "scvtf v26.4s, v26.4s, #0x4\n"
- "scvtf v10.4s, v10.4s, #0x4\n"
- "fmla v5.4s, v26.4s, v24.4s\n"
- "scvtf v2.4s, v2.4s, #0x4\n"
- "scvtf v29.4s, v29.4s, #0x4\n"
- "fmla v21.4s, v10.4s, v31.4s\n"
- "fmla v8.4s, v2.4s, v6.4s\n"
- "fmla v1.4s, v29.4s, v20.4s\n"
- "bgt 3b\n"
- "mov x20, %x[res_ptr]\n"
- "subs x27, x27, #0x4\n"
- "add %x[res_ptr], %x[res_ptr], #0x10\n"
- "str q15, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q19, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q18, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q14, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q11, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q13, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q23, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q16, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q25, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q7, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q0, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q4, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q5, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q21, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q8, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q1, [x20, #0x0]\n"
- "bne 2b\n"
- "mov x20, #0x4\n"
- "sub x10, x10, #0x10\n"
- "cmp x10, #0x10\n"
- "mov %x[res_ptr], x26\n"
- "madd %x[a_ptr], x20, x9, %x[a_ptr]\n"
- "bge 1b\n"
- "4:" // Row loop skip
- "cbz x10, 9f\n"
- "5:" // Row tail: Row loop
- "add x24, %x[b_ptr], #0x8\n"
- "mov x23, %x[nc]\n"
- "add x22, %x[res_ptr], %x[res_stride], LSL #2\n"
- "6:" // Row tail: Column loop
- "movi v15.16b, #0x0\n"
- "movi v19.16b, #0x0\n"
- "add x25, %x[a_ptr], #0x8\n"
- "mov x21, %x[nb]\n"
- "movi v18.16b, #0x0\n"
- "movi v14.16b, #0x0\n"
- "7:" // Row tail: Block loop
- "ldr q7, [x24, #0x0]\n"
- "ldr q5, [x25, #0x0]\n"
- "movi v9.16b, #0x4\n"
- "movi v4.4s, #0x0\n"
- "ldr q3, [x24, #0x10]\n"
- "ldr q2, [x25, #0x10]\n"
- "movi v1.4s, #0x0\n"
- "movi v0.4s, #0x0\n"
- "ldr q13, [x24, #0x20]\n"
- "ldr q31, [x25, #0x20]\n"
- "movi v30.4s, #0x0\n"
- "movi v29.16b, #0xf0\n"
- "ldr q28, [x24, #0x30]\n"
- "ldr q27, [x25, #0x30]\n"
- "sshl v20.16b, v7.16b, v9.16b\n"
- "sub x20, x24, #0x8\n"
- "ldr q26, [x25, #0x40]\n"
- "ldr q25, [x25, #0x50]\n"
- "sshl v17.16b, v3.16b, v9.16b\n"
- "and v7.16b, v7.16b, v29.16b\n"
- "ldr q24, [x25, #0x60]\n"
- "ldr q16, [x25, #0x70]\n"
- "sshl v22.16b, v13.16b, v9.16b\n"
- "and v3.16b, v3.16b, v29.16b\n"
- "ldr d21, [x20, #0x0]\n"
- "ldr d12, [x25, #-0x8]\n"
- ".inst 0x4f85e284 // sdot v4.4s, v20.16b, v5.4b[0]\n"
- ".inst 0x4fa5e281 // sdot v1.4s, v20.16b, v5.4b[1]\n"
- ".inst 0x4f85ea80 // sdot v0.4s, v20.16b, v5.4b[2]\n"
- ".inst 0x4fa5ea9e // sdot v30.4s, v20.16b, v5.4b[3]\n"
- "sshl v9.16b, v28.16b, v9.16b\n"
- "subs x21, x21, #0x1\n"
- "and v13.16b, v13.16b, v29.16b\n"
- "and v28.16b, v28.16b, v29.16b\n"
- "add x25, x25, #0x88\n"
- "add x24, x24, #0x48\n"
- "fcvtl v21.4s, v21.4h\n"
- "fcvtl v12.4s, v12.4h\n"
- ".inst 0x4f82e224 // sdot v4.4s, v17.16b, v2.4b[0]\n"
- ".inst 0x4fa2e221 // sdot v1.4s, v17.16b, v2.4b[1]\n"
- ".inst 0x4f82ea20 // sdot v0.4s, v17.16b, v2.4b[2]\n"
- ".inst 0x4fa2ea3e // sdot v30.4s, v17.16b, v2.4b[3]\n"
- "fmul v11.4s, v21.4s, v12.s[0]\n"
- "fmul v23.4s, v21.4s, v12.s[1]\n"
- "fmul v17.4s, v21.4s, v12.s[2]\n"
- ".inst 0x4f9fe2c4 // sdot v4.4s, v22.16b, v31.4b[0]\n"
- "fmul v6.4s, v21.4s, v12.s[3]\n"
- ".inst 0x4fbfe2c1 // sdot v1.4s, v22.16b, v31.4b[1]\n"
- ".inst 0x4f9feac0 // sdot v0.4s, v22.16b, v31.4b[2]\n"
- ".inst 0x4fbfeade // sdot v30.4s, v22.16b, v31.4b[3]\n"
- ".inst 0x4f9be124 // sdot v4.4s, v9.16b, v27.4b[0]\n"
- ".inst 0x4fbbe121 // sdot v1.4s, v9.16b, v27.4b[1]\n"
- ".inst 0x4f9be920 // sdot v0.4s, v9.16b, v27.4b[2]\n"
- ".inst 0x4fbbe93e // sdot v30.4s, v9.16b, v27.4b[3]\n"
- ".inst 0x4f9ae0e4 // sdot v4.4s, v7.16b, v26.4b[0]\n"
- ".inst 0x4fbae0e1 // sdot v1.4s, v7.16b, v26.4b[1]\n"
- ".inst 0x4f9ae8e0 // sdot v0.4s, v7.16b, v26.4b[2]\n"
- ".inst 0x4fbae8fe // sdot v30.4s, v7.16b, v26.4b[3]\n"
- ".inst 0x4f99e064 // sdot v4.4s, v3.16b, v25.4b[0]\n"
- ".inst 0x4fb9e061 // sdot v1.4s, v3.16b, v25.4b[1]\n"
- ".inst 0x4f99e860 // sdot v0.4s, v3.16b, v25.4b[2]\n"
- ".inst 0x4fb9e87e // sdot v30.4s, v3.16b, v25.4b[3]\n"
- ".inst 0x4f98e1a4 // sdot v4.4s, v13.16b, v24.4b[0]\n"
- ".inst 0x4fb8e1a1 // sdot v1.4s, v13.16b, v24.4b[1]\n"
- ".inst 0x4f98e9a0 // sdot v0.4s, v13.16b, v24.4b[2]\n"
- ".inst 0x4fb8e9be // sdot v30.4s, v13.16b, v24.4b[3]\n"
- ".inst 0x4f90e384 // sdot v4.4s, v28.16b, v16.4b[0]\n"
- ".inst 0x4fb0e381 // sdot v1.4s, v28.16b, v16.4b[1]\n"
- ".inst 0x4f90eb80 // sdot v0.4s, v28.16b, v16.4b[2]\n"
- ".inst 0x4fb0eb9e // sdot v30.4s, v28.16b, v16.4b[3]\n"
- "scvtf v4.4s, v4.4s, #0x4\n"
- "scvtf v1.4s, v1.4s, #0x4\n"
- "scvtf v0.4s, v0.4s, #0x4\n"
- "fmla v15.4s, v4.4s, v11.4s\n"
- "scvtf v30.4s, v30.4s, #0x4\n"
- "fmla v19.4s, v1.4s, v23.4s\n"
- "fmla v18.4s, v0.4s, v17.4s\n"
- "fmla v14.4s, v30.4s, v6.4s\n"
- "bgt 7b\n"
- "mov x20, %x[res_ptr]\n"
- "cmp x10, #0x1\n"
- "str q15, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "ble 8f\n"
- "cmp x10, #0x2\n"
- "str q19, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "ble 8f\n"
- "cmp x10, #0x3\n"
- "str q18, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "ble 8f\n"
- "str q14, [x20, #0x0]\n"
- "8:" // Row tail: Accumulator store skip
- "subs x23, x23, #0x4\n"
- "add %x[res_ptr], %x[res_ptr], #0x10\n"
- "bne 6b\n"
- "subs x10, x10, #0x4\n"
- "add %x[a_ptr], %x[a_ptr], x9\n"
- "mov %x[res_ptr], x22\n"
- "bgt 5b\n"
- "9:" // Row tail: Row loop skip
- : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr)
- : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
- );
- #else
- float sumf[4][4];
- int sumi;
- for (int y = 0; y < nr / 4; y++) {
- const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb);
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb);
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0;
- }
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) +
- (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4;
- }
- sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]);
- }
- }
- }
- }
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++)
- s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j];
- }
- }
- }
- #endif
- }
- void ggml_gemm_q4_0_4x8_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
- const int qk = QK8_0;
- const int nb = n / qk;
- const int ncols_interleaved = 4;
- const int blocklen = 8;
- assert (n % qk == 0);
- assert (nr % 4 == 0);
- assert (nc % ncols_interleaved == 0);
- UNUSED(s);
- UNUSED(bs);
- UNUSED(vx);
- UNUSED(vy);
- UNUSED(nr);
- UNUSED(nc);
- UNUSED(nb);
- UNUSED(ncols_interleaved);
- UNUSED(blocklen);
- #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8)
- if (ggml_sve_cnt_b == QK8_0) {
- GGML_ASSERT(!(ggml_cpu_has_sve() && (ggml_sve_cnt_b == QK8_0)) &&
- "__ARM_FEATURE_SVE defined, use the Q4_0_8_8 quantization format for optimal performance");
- }
- #endif
- #if defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) && ! ((defined(_MSC_VER)) && ! defined(__clang__))
- const void * b_ptr = vx;
- const void * a_ptr = vy;
- float * res_ptr = s;
- size_t res_stride = bs * sizeof(float);
- __asm__ __volatile__(
- "mov x10, %x[nr]\n"
- "mov x9, #0x88\n"
- "cmp x10, #0x10\n"
- "mul x9, %x[nb], x9\n"
- "blt 4f\n"
- "1:" // Row loop
- "add x28, %x[b_ptr], #0x8\n"
- "mov x27, %x[nc]\n"
- "add x26, %x[res_ptr], %x[res_stride], LSL #4\n"
- "2:" // Column loop
- "add x25, %x[a_ptr], #0x8\n"
- "movi v2.16b, #0x0\n"
- "movi v10.16b, #0x0\n"
- "mov x24, %x[nb]\n"
- "add x23, x25, x9\n"
- "movi v12.16b, #0x0\n"
- "movi v28.16b, #0x0\n"
- "add x22, x23, x9\n"
- "movi v11.16b, #0x0\n"
- "movi v13.16b, #0x0\n"
- "add x21, x22, x9\n"
- "movi v22.16b, #0x0\n"
- "movi v23.16b, #0x0\n"
- "movi v25.16b, #0x0\n"
- "movi v5.16b, #0x0\n"
- "movi v7.16b, #0x0\n"
- "movi v4.16b, #0x0\n"
- "movi v6.16b, #0x0\n"
- "movi v30.16b, #0x0\n"
- "movi v24.16b, #0x0\n"
- "movi v14.16b, #0x0\n"
- "3:" // Block loop
- "ldr q21, [x28, #0x0]\n"
- "ldr q16, [x28, #0x10]\n"
- "movi v1.16b, #0x4\n"
- "movi v19.4s, #0x0\n"
- "ldr q27, [x25, #0x0]\n"
- "ldr q15, [x25, #0x10]\n"
- "movi v26.4s, #0x0\n"
- "movi v18.4s, #0x0\n"
- "ldr q29, [x28, #0x20]\n"
- "ldr q3, [x28, #0x30]\n"
- "movi v17.4s, #0x0\n"
- "movi v0.16b, #0xf0\n"
- "ldr d20, [x25, #-0x8]\n"
- "ldr d9, [x23, #-0x8]\n"
- "sshl v8.16b, v21.16b, v1.16b\n"
- "sshl v31.16b, v16.16b, v1.16b\n"
- "and v21.16b, v21.16b, v0.16b\n"
- "and v16.16b, v16.16b, v0.16b\n"
- "sub x20, x28, #0x8\n"
- "subs x24, x24, #0x1\n"
- "add x28, x28, #0x48\n"
- ".inst 0x4e88a773 // smmla v19.4s, v27.16b, v8.16b\n"
- ".inst 0x4e9fa77a // smmla v26.4s, v27.16b, v31.16b\n"
- "ldr q27, [x25, #0x20]\n"
- ".inst 0x4e88a5f2 // smmla v18.4s, v15.16b, v8.16b\n"
- ".inst 0x4e9fa5f1 // smmla v17.4s, v15.16b, v31.16b\n"
- "sshl v15.16b, v29.16b, v1.16b\n"
- "sshl v1.16b, v3.16b, v1.16b\n"
- "and v29.16b, v29.16b, v0.16b\n"
- "and v3.16b, v3.16b, v0.16b\n"
- "ldr q0, [x25, #0x30]\n"
- "fcvtl v20.4s, v20.4h\n"
- ".inst 0x4e8fa773 // smmla v19.4s, v27.16b, v15.16b\n"
- "fcvtl v9.4s, v9.4h\n"
- ".inst 0x4e81a77a // smmla v26.4s, v27.16b, v1.16b\n"
- "ldr q27, [x25, #0x40]\n"
- ".inst 0x4e8fa412 // smmla v18.4s, v0.16b, v15.16b\n"
- ".inst 0x4e81a411 // smmla v17.4s, v0.16b, v1.16b\n"
- "ldr q0, [x25, #0x50]\n"
- ".inst 0x4e95a773 // smmla v19.4s, v27.16b, v21.16b\n"
- ".inst 0x4e90a77a // smmla v26.4s, v27.16b, v16.16b\n"
- "ldr q27, [x25, #0x60]\n"
- ".inst 0x4e95a412 // smmla v18.4s, v0.16b, v21.16b\n"
- ".inst 0x4e90a411 // smmla v17.4s, v0.16b, v16.16b\n"
- "ldr q0, [x25, #0x70]\n"
- "add x25, x25, #0x88\n"
- ".inst 0x4e9da773 // smmla v19.4s, v27.16b, v29.16b\n"
- ".inst 0x4e83a77a // smmla v26.4s, v27.16b, v3.16b\n"
- "ldr d27, [x20, #0x0]\n"
- ".inst 0x4e9da412 // smmla v18.4s, v0.16b, v29.16b\n"
- ".inst 0x4e83a411 // smmla v17.4s, v0.16b, v3.16b\n"
- "fcvtl v27.4s, v27.4h\n"
- "uzp1 v0.2d, v19.2d, v26.2d\n"
- "uzp2 v26.2d, v19.2d, v26.2d\n"
- "fmul v19.4s, v27.4s, v20.s[0]\n"
- "scvtf v0.4s, v0.4s, #0x4\n"
- "scvtf v26.4s, v26.4s, #0x4\n"
- "fmla v2.4s, v0.4s, v19.4s\n"
- "ldr q19, [x23, #0x0]\n"
- "uzp1 v0.2d, v18.2d, v17.2d\n"
- "uzp2 v18.2d, v18.2d, v17.2d\n"
- "fmul v17.4s, v27.4s, v20.s[1]\n"
- "scvtf v0.4s, v0.4s, #0x4\n"
- "scvtf v18.4s, v18.4s, #0x4\n"
- "fmla v10.4s, v26.4s, v17.4s\n"
- "ldr q17, [x23, #0x10]\n"
- "fmul v26.4s, v27.4s, v20.s[2]\n"
- "fmul v20.4s, v27.4s, v20.s[3]\n"
- "fmla v12.4s, v0.4s, v26.4s\n"
- "ldr d0, [x22, #-0x8]\n"
- "ldr d26, [x21, #-0x8]\n"
- "fcvtl v0.4s, v0.4h\n"
- "fmla v28.4s, v18.4s, v20.4s\n"
- "movi v20.4s, #0x0\n"
- "movi v18.4s, #0x0\n"
- ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n"
- ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n"
- "ldr q19, [x23, #0x20]\n"
- "fcvtl v26.4s, v26.4h\n"
- ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n"
- ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n"
- "ldr q19, [x23, #0x40]\n"
- ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n"
- ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n"
- "ldr q19, [x23, #0x60]\n"
- ".inst 0x4e9da674 // smmla v20.4s, v19.16b, v29.16b\n"
- ".inst 0x4e83a672 // smmla v18.4s, v19.16b, v3.16b\n"
- "uzp1 v19.2d, v20.2d, v18.2d\n"
- "scvtf v19.4s, v19.4s, #0x4\n"
- "uzp2 v20.2d, v20.2d, v18.2d\n"
- "fmul v18.4s, v27.4s, v9.s[0]\n"
- "scvtf v20.4s, v20.4s, #0x4\n"
- "fmla v11.4s, v19.4s, v18.4s\n"
- "ldr q18, [x22, #0x0]\n"
- "fmul v19.4s, v27.4s, v9.s[1]\n"
- "fmla v13.4s, v20.4s, v19.4s\n"
- "movi v19.4s, #0x0\n"
- "movi v20.4s, #0x0\n"
- ".inst 0x4e88a633 // smmla v19.4s, v17.16b, v8.16b\n"
- ".inst 0x4e9fa634 // smmla v20.4s, v17.16b, v31.16b\n"
- "ldr q17, [x23, #0x30]\n"
- ".inst 0x4e8fa633 // smmla v19.4s, v17.16b, v15.16b\n"
- ".inst 0x4e81a634 // smmla v20.4s, v17.16b, v1.16b\n"
- "ldr q17, [x23, #0x50]\n"
- ".inst 0x4e95a633 // smmla v19.4s, v17.16b, v21.16b\n"
- ".inst 0x4e90a634 // smmla v20.4s, v17.16b, v16.16b\n"
- "ldr q17, [x23, #0x70]\n"
- "add x23, x23, #0x88\n"
- ".inst 0x4e9da633 // smmla v19.4s, v17.16b, v29.16b\n"
- ".inst 0x4e83a634 // smmla v20.4s, v17.16b, v3.16b\n"
- "uzp1 v17.2d, v19.2d, v20.2d\n"
- "scvtf v17.4s, v17.4s, #0x4\n"
- "uzp2 v20.2d, v19.2d, v20.2d\n"
- "fmul v19.4s, v27.4s, v9.s[2]\n"
- "fmul v9.4s, v27.4s, v9.s[3]\n"
- "scvtf v20.4s, v20.4s, #0x4\n"
- "fmla v22.4s, v17.4s, v19.4s\n"
- "ldr q17, [x22, #0x10]\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4e88a653 // smmla v19.4s, v18.16b, v8.16b\n"
- "fmla v23.4s, v20.4s, v9.4s\n"
- "movi v20.4s, #0x0\n"
- "movi v9.4s, #0x0\n"
- ".inst 0x4e9fa654 // smmla v20.4s, v18.16b, v31.16b\n"
- "ldr q18, [x22, #0x20]\n"
- ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n"
- ".inst 0x4e8fa653 // smmla v19.4s, v18.16b, v15.16b\n"
- ".inst 0x4e81a654 // smmla v20.4s, v18.16b, v1.16b\n"
- "ldr q18, [x22, #0x40]\n"
- ".inst 0x4e95a653 // smmla v19.4s, v18.16b, v21.16b\n"
- ".inst 0x4e90a654 // smmla v20.4s, v18.16b, v16.16b\n"
- "ldr q18, [x22, #0x60]\n"
- ".inst 0x4e9da653 // smmla v19.4s, v18.16b, v29.16b\n"
- ".inst 0x4e83a654 // smmla v20.4s, v18.16b, v3.16b\n"
- "movi v18.4s, #0x0\n"
- ".inst 0x4e9fa632 // smmla v18.4s, v17.16b, v31.16b\n"
- "ldr q17, [x22, #0x30]\n"
- ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n"
- ".inst 0x4e81a632 // smmla v18.4s, v17.16b, v1.16b\n"
- "ldr q17, [x22, #0x50]\n"
- ".inst 0x4e95a629 // smmla v9.4s, v17.16b, v21.16b\n"
- ".inst 0x4e90a632 // smmla v18.4s, v17.16b, v16.16b\n"
- "ldr q17, [x22, #0x70]\n"
- "add x22, x22, #0x88\n"
- ".inst 0x4e9da629 // smmla v9.4s, v17.16b, v29.16b\n"
- ".inst 0x4e83a632 // smmla v18.4s, v17.16b, v3.16b\n"
- "uzp1 v17.2d, v19.2d, v20.2d\n"
- "uzp2 v20.2d, v19.2d, v20.2d\n"
- "fmul v19.4s, v27.4s, v0.s[0]\n"
- "scvtf v17.4s, v17.4s, #0x4\n"
- "scvtf v20.4s, v20.4s, #0x4\n"
- "fmla v25.4s, v17.4s, v19.4s\n"
- "ldr q19, [x21, #0x0]\n"
- "fmul v17.4s, v27.4s, v0.s[1]\n"
- "fmla v5.4s, v20.4s, v17.4s\n"
- "ldr q17, [x21, #0x10]\n"
- "uzp1 v20.2d, v9.2d, v18.2d\n"
- "uzp2 v9.2d, v9.2d, v18.2d\n"
- "fmul v18.4s, v27.4s, v0.s[2]\n"
- "fmul v0.4s, v27.4s, v0.s[3]\n"
- "scvtf v20.4s, v20.4s, #0x4\n"
- "scvtf v9.4s, v9.4s, #0x4\n"
- "fmla v7.4s, v20.4s, v18.4s\n"
- "movi v20.4s, #0x0\n"
- "movi v18.4s, #0x0\n"
- ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n"
- ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n"
- "ldr q19, [x21, #0x20]\n"
- "fmla v4.4s, v9.4s, v0.4s\n"
- "movi v9.4s, #0x0\n"
- "movi v0.4s, #0x0\n"
- ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n"
- "fmul v8.4s, v27.4s, v26.s[0]\n"
- ".inst 0x4e9fa620 // smmla v0.4s, v17.16b, v31.16b\n"
- "ldr q17, [x21, #0x30]\n"
- ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n"
- "fmul v31.4s, v27.4s, v26.s[1]\n"
- ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n"
- "ldr q19, [x21, #0x40]\n"
- ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n"
- "fmul v15.4s, v27.4s, v26.s[2]\n"
- "fmul v27.4s, v27.4s, v26.s[3]\n"
- ".inst 0x4e81a620 // smmla v0.4s, v17.16b, v1.16b\n"
- "ldr q1, [x21, #0x50]\n"
- ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n"
- ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n"
- "ldr q26, [x21, #0x60]\n"
- ".inst 0x4e95a429 // smmla v9.4s, v1.16b, v21.16b\n"
- ".inst 0x4e90a420 // smmla v0.4s, v1.16b, v16.16b\n"
- "ldr q21, [x21, #0x70]\n"
- "add x21, x21, #0x88\n"
- ".inst 0x4e9da754 // smmla v20.4s, v26.16b, v29.16b\n"
- ".inst 0x4e83a752 // smmla v18.4s, v26.16b, v3.16b\n"
- ".inst 0x4e9da6a9 // smmla v9.4s, v21.16b, v29.16b\n"
- ".inst 0x4e83a6a0 // smmla v0.4s, v21.16b, v3.16b\n"
- "uzp1 v29.2d, v20.2d, v18.2d\n"
- "uzp2 v21.2d, v20.2d, v18.2d\n"
- "scvtf v29.4s, v29.4s, #0x4\n"
- "uzp1 v18.2d, v9.2d, v0.2d\n"
- "uzp2 v16.2d, v9.2d, v0.2d\n"
- "scvtf v21.4s, v21.4s, #0x4\n"
- "fmla v6.4s, v29.4s, v8.4s\n"
- "scvtf v18.4s, v18.4s, #0x4\n"
- "scvtf v16.4s, v16.4s, #0x4\n"
- "fmla v30.4s, v21.4s, v31.4s\n"
- "fmla v24.4s, v18.4s, v15.4s\n"
- "fmla v14.4s, v16.4s, v27.4s\n"
- "bgt 3b\n"
- "mov x20, %x[res_ptr]\n"
- "subs x27, x27, #0x4\n"
- "add %x[res_ptr], %x[res_ptr], #0x10\n"
- "str q2, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q10, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q12, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q28, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q11, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q13, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q22, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q23, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q25, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q5, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q7, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q4, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q6, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q30, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q24, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "str q14, [x20, #0x0]\n"
- "bne 2b\n"
- "mov x20, #0x4\n"
- "sub x10, x10, #0x10\n"
- "cmp x10, #0x10\n"
- "mov %x[res_ptr], x26\n"
- "madd %x[a_ptr], x20, x9, %x[a_ptr]\n"
- "bge 1b\n"
- "4:" // Row loop skip
- "cbz x10, 9f\n"
- "5:" // Row tail: Row loop
- "add x24, %x[b_ptr], #0x8\n"
- "mov x23, %x[nc]\n"
- "add x22, %x[res_ptr], %x[res_stride], LSL #2\n"
- "6:" // Row tail: Column loop
- "movi v2.16b, #0x0\n"
- "movi v10.16b, #0x0\n"
- "add x25, %x[a_ptr], #0x8\n"
- "mov x21, %x[nb]\n"
- "movi v12.16b, #0x0\n"
- "movi v28.16b, #0x0\n"
- "7:" // Row tail: Block loop
- "ldr q6, [x24, #0x0]\n"
- "ldr q5, [x24, #0x10]\n"
- "movi v17.16b, #0x4\n"
- "movi v8.4s, #0x0\n"
- "ldr q4, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "movi v27.4s, #0x0\n"
- "movi v0.4s, #0x0\n"
- "ldr q31, [x24, #0x20]\n"
- "ldr q14, [x24, #0x30]\n"
- "movi v29.4s, #0x0\n"
- "movi v22.16b, #0xf0\n"
- "ldr q11, [x25, #0x20]\n"
- "ldr q23, [x25, #0x30]\n"
- "sshl v21.16b, v6.16b, v17.16b\n"
- "sshl v16.16b, v5.16b, v17.16b\n"
- "ldr q20, [x25, #0x40]\n"
- "ldr q26, [x25, #0x50]\n"
- "and v6.16b, v6.16b, v22.16b\n"
- "and v5.16b, v5.16b, v22.16b\n"
- "ldr q25, [x25, #0x60]\n"
- "ldr q3, [x25, #0x70]\n"
- "sshl v19.16b, v31.16b, v17.16b\n"
- "sshl v18.16b, v14.16b, v17.16b\n"
- "ldr d17, [x25, #-0x8]\n"
- ".inst 0x4e95a488 // smmla v8.4s, v4.16b, v21.16b\n"
- ".inst 0x4e90a49b // smmla v27.4s, v4.16b, v16.16b\n"
- "and v31.16b, v31.16b, v22.16b\n"
- ".inst 0x4e95a5a0 // smmla v0.4s, v13.16b, v21.16b\n"
- ".inst 0x4e90a5bd // smmla v29.4s, v13.16b, v16.16b\n"
- "and v14.16b, v14.16b, v22.16b\n"
- "sub x20, x24, #0x8\n"
- "ldr d16, [x20, #0x0]\n"
- "subs x21, x21, #0x1\n"
- "add x25, x25, #0x88\n"
- "fcvtl v17.4s, v17.4h\n"
- "add x24, x24, #0x48\n"
- ".inst 0x4e93a568 // smmla v8.4s, v11.16b, v19.16b\n"
- ".inst 0x4e92a57b // smmla v27.4s, v11.16b, v18.16b\n"
- ".inst 0x4e93a6e0 // smmla v0.4s, v23.16b, v19.16b\n"
- ".inst 0x4e92a6fd // smmla v29.4s, v23.16b, v18.16b\n"
- "fcvtl v16.4s, v16.4h\n"
- ".inst 0x4e86a688 // smmla v8.4s, v20.16b, v6.16b\n"
- ".inst 0x4e85a69b // smmla v27.4s, v20.16b, v5.16b\n"
- "fmul v23.4s, v16.4s, v17.s[0]\n"
- "fmul v21.4s, v16.4s, v17.s[1]\n"
- "fmul v1.4s, v16.4s, v17.s[2]\n"
- "fmul v20.4s, v16.4s, v17.s[3]\n"
- ".inst 0x4e86a740 // smmla v0.4s, v26.16b, v6.16b\n"
- ".inst 0x4e85a75d // smmla v29.4s, v26.16b, v5.16b\n"
- ".inst 0x4e9fa728 // smmla v8.4s, v25.16b, v31.16b\n"
- ".inst 0x4e8ea73b // smmla v27.4s, v25.16b, v14.16b\n"
- ".inst 0x4e9fa460 // smmla v0.4s, v3.16b, v31.16b\n"
- ".inst 0x4e8ea47d // smmla v29.4s, v3.16b, v14.16b\n"
- "uzp1 v19.2d, v8.2d, v27.2d\n"
- "uzp2 v18.2d, v8.2d, v27.2d\n"
- "scvtf v19.4s, v19.4s, #0x4\n"
- "uzp1 v17.2d, v0.2d, v29.2d\n"
- "uzp2 v16.2d, v0.2d, v29.2d\n"
- "scvtf v18.4s, v18.4s, #0x4\n"
- "fmla v2.4s, v19.4s, v23.4s\n"
- "scvtf v17.4s, v17.4s, #0x4\n"
- "scvtf v16.4s, v16.4s, #0x4\n"
- "fmla v10.4s, v18.4s, v21.4s\n"
- "fmla v12.4s, v17.4s, v1.4s\n"
- "fmla v28.4s, v16.4s, v20.4s\n"
- "bgt 7b\n"
- "mov x20, %x[res_ptr]\n"
- "cmp x10, #0x1\n"
- "str q2, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "ble 8f\n"
- "cmp x10, #0x2\n"
- "str q10, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "ble 8f\n"
- "cmp x10, #0x3\n"
- "str q12, [x20, #0x0]\n"
- "add x20, x20, %x[res_stride]\n"
- "ble 8f\n"
- "str q28, [x20, #0x0]\n"
- "8:" // Row tail: Accumulator store skip
- "subs x23, x23, #0x4\n"
- "add %x[res_ptr], %x[res_ptr], #0x10\n"
- "bne 6b\n"
- "subs x10, x10, #0x4\n"
- "add %x[a_ptr], %x[a_ptr], x9\n"
- "mov %x[res_ptr], x22\n"
- "bgt 5b\n"
- "9:" // Row tail: Row loop skip
- : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr)
- : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
- );
- #elif defined(__ARM_NEON) && defined(__aarch64__)
- GGML_ASSERT((ggml_cpu_has_sve() || ggml_cpu_has_matmul_int8()) &&
- "__ARM_FEATURE_SVE and __ARM_FEATURE_MATMUL_INT8 not defined, use the Q4_0_4_4 quantization format for optimal "
- "performance");
- #else
- float sumf[4][4];
- int sumi;
- for (int y = 0; y < nr / 4; y++) {
- const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb);
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb);
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0;
- }
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) +
- (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4;
- }
- sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]);
- }
- }
- }
- }
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++)
- s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j];
- }
- }
- }
- #endif
- }
- void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
- const int qk = QK8_0;
- const int nb = n / qk;
- const int ncols_interleaved = 8;
- const int blocklen = 8;
- assert (n % qk == 0);
- assert (nr % 4 == 0);
- assert (nc % ncols_interleaved == 0);
- UNUSED(s);
- UNUSED(bs);
- UNUSED(vx);
- UNUSED(vy);
- UNUSED(nr);
- UNUSED(nc);
- UNUSED(nb);
- UNUSED(ncols_interleaved);
- UNUSED(blocklen);
- #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) && ! ((defined(_MSC_VER)) && ! defined(__clang__))
- if (ggml_sve_cnt_b == QK8_0) {
- const void * b_ptr = vx;
- const void * a_ptr = vy;
- float * res_ptr = s;
- size_t res_stride = bs * sizeof(float);
- __asm__ __volatile__(
- "mov x20, #0x4\n"
- "mov x13, %x[nr]\n"
- "mov z28.s, #-0x4\n"
- "mov x12, #0x88\n"
- "ptrue p1.b\n"
- "whilelt p0.s, XZR, x20\n"
- "cmp x13, #0x10\n"
- "mul x12, %x[nb], x12\n"
- "blt 4f\n"
- "1:" // Row loop
- "add x11, %x[b_ptr], #0x10\n"
- "mov x10, %x[nc]\n"
- "add x9, %x[res_ptr], %x[res_stride], LSL #4\n"
- "2:" // Column loop
- "add x28, %x[a_ptr], #0x8\n"
- "mov z24.b, #0x0\n"
- "mov z15.b, #0x0\n"
- "mov x27, %x[nb]\n"
- "add x26, x28, x12\n"
- "mov z12.b, #0x0\n"
- "mov z0.b, #0x0\n"
- "add x25, x26, x12\n"
- "mov z13.b, #0x0\n"
- "mov z1.b, #0x0\n"
- "add x24, x25, x12\n"
- "mov z20.b, #0x0\n"
- "mov z25.b, #0x0\n"
- "mov z11.b, #0x0\n"
- "mov z16.b, #0x0\n"
- "mov z19.b, #0x0\n"
- "mov z26.b, #0x0\n"
- "mov z8.b, #0x0\n"
- "mov z29.b, #0x0\n"
- "mov z27.b, #0x0\n"
- "mov z10.b, #0x0\n"
- "3:" // Block loop
- "ld1b { z30.b }, p1/Z, [x11]\n"
- "ld1b { z21.b }, p1/Z, [x11, #1, MUL VL]\n"
- "mov z18.s, #0x0\n"
- "mov z7.s, #0x0\n"
- "ld1rqb { z3.b }, p1/Z, [x28]\n"
- "ld1rqb { z5.b }, p1/Z, [x28, #16]\n"
- "mov z9.s, #0x0\n"
- "mov z22.s, #0x0\n"
- "ld1b { z4.b }, p1/Z, [x11, #2, MUL VL]\n"
- "ld1b { z17.b }, p1/Z, [x11, #3, MUL VL]\n"
- "sub x20, x11, #0x10\n"
- "sub x23, x28, #0x8\n"
- "lsl z31.b, z30.b, #0x4\n"
- "lsl z6.b, z21.b, #0x4\n"
- "ld1h { z23.s }, p1/Z, [x20]\n"
- "sub x22, x26, #0x8\n"
- "and z30.b, z30.b, #0xf0\n"
- "and z21.b, z21.b, #0xf0\n"
- "sub x21, x25, #0x8\n"
- "sub x20, x24, #0x8\n"
- "lsl z14.b, z4.b, #0x4\n"
- "lsl z2.b, z17.b, #0x4\n"
- "subs x27, x27, #0x1\n"
- "add x11, x11, #0x90\n"
- ".inst 0x451f9872 // smmla z18.s, z3.b, z31.b\n"
- ".inst 0x45069867 // smmla z7.s, z3.b, z6.b\n"
- "ld1rqb { z3.b }, p1/Z, [x28, #32]\n"
- "and z4.b, z4.b, #0xf0\n"
- ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n"
- ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n"
- "ld1rqb { z5.b }, p1/Z, [x28, #48]\n"
- "and z17.b, z17.b, #0xf0\n"
- "fcvt z23.s, p1/m, z23.h\n"
- ".inst 0x450e9872 // smmla z18.s, z3.b, z14.b\n"
- ".inst 0x45029867 // smmla z7.s, z3.b, z2.b\n"
- "ld1rqb { z3.b }, p1/Z, [x28, #64]\n"
- ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n"
- ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n"
- "ld1rqb { z5.b }, p1/Z, [x28, #80]\n"
- "fscale z23.s, p1/m, z23.s, z28.s\n"
- ".inst 0x451e9872 // smmla z18.s, z3.b, z30.b\n"
- ".inst 0x45159867 // smmla z7.s, z3.b, z21.b\n"
- "ld1rqb { z3.b }, p1/Z, [x28, #96]\n"
- ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n"
- ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n"
- "ld1rqb { z5.b }, p1/Z, [x28, #112]\n"
- "add x28, x28, #0x88\n"
- ".inst 0x45049872 // smmla z18.s, z3.b, z4.b\n"
- ".inst 0x45119867 // smmla z7.s, z3.b, z17.b\n"
- "ld1h { z3.s }, p0/Z, [x23]\n"
- ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n"
- ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n"
- "fcvt z3.s, p1/m, z3.h\n"
- "uzp1 z5.d, z18.d, z7.d\n"
- "uzp2 z18.d, z18.d, z7.d\n"
- "mov z3.q, z3.q[0]\n"
- "uzp1 z7.d, z9.d, z22.d\n"
- "uzp2 z22.d, z9.d, z22.d\n"
- "fmul z9.s, z23.s, z3.s[0]\n"
- "scvtf z5.s, p1/m, z5.s\n"
- "scvtf z18.s, p1/m, z18.s\n"
- "scvtf z7.s, p1/m, z7.s\n"
- "scvtf z22.s, p1/m, z22.s\n"
- "fmla z24.s, p1/M, z5.s, z9.s\n"
- "ld1rqb { z5.b }, p1/Z, [x26]\n"
- "fmul z9.s, z23.s, z3.s[1]\n"
- "fmla z15.s, p1/M, z18.s, z9.s\n"
- "ld1rqb { z18.b }, p1/Z, [x26, #16]\n"
- "fmul z9.s, z23.s, z3.s[2]\n"
- "fmul z3.s, z23.s, z3.s[3]\n"
- "fmla z12.s, p1/M, z7.s, z9.s\n"
- "mov z9.s, #0x0\n"
- "ld1h { z7.s }, p0/Z, [x22]\n"
- ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n"
- "fmla z0.s, p1/M, z22.s, z3.s\n"
- "mov z22.s, #0x0\n"
- "ld1h { z3.s }, p0/Z, [x21]\n"
- ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n"
- "ld1rqb { z5.b }, p1/Z, [x26, #32]\n"
- "fcvt z7.s, p1/m, z7.h\n"
- "fcvt z3.s, p1/m, z3.h\n"
- ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n"
- ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n"
- "ld1rqb { z5.b }, p1/Z, [x26, #64]\n"
- "mov z7.q, z7.q[0]\n"
- "mov z3.q, z3.q[0]\n"
- ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n"
- ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n"
- "ld1rqb { z5.b }, p1/Z, [x26, #96]\n"
- ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n"
- ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n"
- "uzp1 z5.d, z9.d, z22.d\n"
- "scvtf z5.s, p1/m, z5.s\n"
- "uzp2 z22.d, z9.d, z22.d\n"
- "fmul z9.s, z23.s, z7.s[0]\n"
- "scvtf z22.s, p1/m, z22.s\n"
- "fmla z13.s, p1/M, z5.s, z9.s\n"
- "ld1rqb { z9.b }, p1/Z, [x25]\n"
- "fmul z5.s, z23.s, z7.s[1]\n"
- "fmla z1.s, p1/M, z22.s, z5.s\n"
- "mov z5.s, #0x0\n"
- "mov z22.s, #0x0\n"
- ".inst 0x451f9a45 // smmla z5.s, z18.b, z31.b\n"
- ".inst 0x45069a56 // smmla z22.s, z18.b, z6.b\n"
- "ld1rqb { z18.b }, p1/Z, [x26, #48]\n"
- ".inst 0x450e9a45 // smmla z5.s, z18.b, z14.b\n"
- ".inst 0x45029a56 // smmla z22.s, z18.b, z2.b\n"
- "ld1rqb { z18.b }, p1/Z, [x26, #80]\n"
- ".inst 0x451e9a45 // smmla z5.s, z18.b, z30.b\n"
- ".inst 0x45159a56 // smmla z22.s, z18.b, z21.b\n"
- "ld1rqb { z18.b }, p1/Z, [x26, #112]\n"
- "add x26, x26, #0x88\n"
- ".inst 0x45049a45 // smmla z5.s, z18.b, z4.b\n"
- ".inst 0x45119a56 // smmla z22.s, z18.b, z17.b\n"
- "uzp1 z18.d, z5.d, z22.d\n"
- "scvtf z18.s, p1/m, z18.s\n"
- "uzp2 z22.d, z5.d, z22.d\n"
- "fmul z5.s, z23.s, z7.s[2]\n"
- "fmul z7.s, z23.s, z7.s[3]\n"
- "scvtf z22.s, p1/m, z22.s\n"
- "fmla z20.s, p1/M, z18.s, z5.s\n"
- "ld1rqb { z18.b }, p1/Z, [x25, #16]\n"
- "ld1h { z5.s }, p0/Z, [x20]\n"
- "fcvt z5.s, p1/m, z5.h\n"
- "fmla z25.s, p1/M, z22.s, z7.s\n"
- "mov z22.s, #0x0\n"
- "mov z7.s, #0x0\n"
- ".inst 0x451f9936 // smmla z22.s, z9.b, z31.b\n"
- ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n"
- "ld1rqb { z9.b }, p1/Z, [x25, #32]\n"
- "mov z5.q, z5.q[0]\n"
- ".inst 0x450e9936 // smmla z22.s, z9.b, z14.b\n"
- ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n"
- "ld1rqb { z9.b }, p1/Z, [x25, #64]\n"
- ".inst 0x451e9936 // smmla z22.s, z9.b, z30.b\n"
- ".inst 0x45159927 // smmla z7.s, z9.b, z21.b\n"
- "ld1rqb { z9.b }, p1/Z, [x25, #96]\n"
- ".inst 0x45049936 // smmla z22.s, z9.b, z4.b\n"
- ".inst 0x45119927 // smmla z7.s, z9.b, z17.b\n"
- "uzp1 z9.d, z22.d, z7.d\n"
- "scvtf z9.s, p1/m, z9.s\n"
- "uzp2 z22.d, z22.d, z7.d\n"
- "fmul z7.s, z23.s, z3.s[0]\n"
- "scvtf z22.s, p1/m, z22.s\n"
- "fmla z11.s, p1/M, z9.s, z7.s\n"
- "ld1rqb { z9.b }, p1/Z, [x24]\n"
- "fmul z7.s, z23.s, z3.s[1]\n"
- "fmla z16.s, p1/M, z22.s, z7.s\n"
- "mov z22.s, #0x0\n"
- "mov z7.s, #0x0\n"
- ".inst 0x451f9a56 // smmla z22.s, z18.b, z31.b\n"
- ".inst 0x45069a47 // smmla z7.s, z18.b, z6.b\n"
- "ld1rqb { z18.b }, p1/Z, [x25, #48]\n"
- ".inst 0x450e9a56 // smmla z22.s, z18.b, z14.b\n"
- ".inst 0x45029a47 // smmla z7.s, z18.b, z2.b\n"
- "ld1rqb { z18.b }, p1/Z, [x25, #80]\n"
- ".inst 0x451e9a56 // smmla z22.s, z18.b, z30.b\n"
- ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n"
- "ld1rqb { z18.b }, p1/Z, [x25, #112]\n"
- "add x25, x25, #0x88\n"
- ".inst 0x45049a56 // smmla z22.s, z18.b, z4.b\n"
- ".inst 0x45119a47 // smmla z7.s, z18.b, z17.b\n"
- "uzp1 z18.d, z22.d, z7.d\n"
- "scvtf z18.s, p1/m, z18.s\n"
- "uzp2 z7.d, z22.d, z7.d\n"
- "fmul z22.s, z23.s, z3.s[2]\n"
- "fmul z3.s, z23.s, z3.s[3]\n"
- "scvtf z7.s, p1/m, z7.s\n"
- "fmla z19.s, p1/M, z18.s, z22.s\n"
- "ld1rqb { z18.b }, p1/Z, [x24, #16]\n"
- "fmul z22.s, z23.s, z5.s[0]\n"
- "fmla z26.s, p1/M, z7.s, z3.s\n"
- "mov z3.s, #0x0\n"
- "mov z7.s, #0x0\n"
- ".inst 0x451f9923 // smmla z3.s, z9.b, z31.b\n"
- ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n"
- "ld1rqb { z9.b }, p1/Z, [x24, #32]\n"
- ".inst 0x450e9923 // smmla z3.s, z9.b, z14.b\n"
- ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n"
- "mov z9.s, #0x0\n"
- ".inst 0x451f9a49 // smmla z9.s, z18.b, z31.b\n"
- "mov z31.s, #0x0\n"
- ".inst 0x45069a5f // smmla z31.s, z18.b, z6.b\n"
- "ld1rqb { z6.b }, p1/Z, [x24, #48]\n"
- "ld1rqb { z18.b }, p1/Z, [x24, #64]\n"
- ".inst 0x450e98c9 // smmla z9.s, z6.b, z14.b\n"
- "fmul z14.s, z23.s, z5.s[1]\n"
- ".inst 0x450298df // smmla z31.s, z6.b, z2.b\n"
- "ld1rqb { z6.b }, p1/Z, [x24, #80]\n"
- "fmul z2.s, z23.s, z5.s[2]\n"
- "fmul z23.s, z23.s, z5.s[3]\n"
- ".inst 0x451e9a43 // smmla z3.s, z18.b, z30.b\n"
- ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n"
- "ld1rqb { z5.b }, p1/Z, [x24, #96]\n"
- ".inst 0x451e98c9 // smmla z9.s, z6.b, z30.b\n"
- ".inst 0x451598df // smmla z31.s, z6.b, z21.b\n"
- "ld1rqb { z18.b }, p1/Z, [x24, #112]\n"
- "add x24, x24, #0x88\n"
- ".inst 0x450498a3 // smmla z3.s, z5.b, z4.b\n"
- ".inst 0x451198a7 // smmla z7.s, z5.b, z17.b\n"
- ".inst 0x45049a49 // smmla z9.s, z18.b, z4.b\n"
- ".inst 0x45119a5f // smmla z31.s, z18.b, z17.b\n"
- "uzp1 z18.d, z3.d, z7.d\n"
- "uzp2 z5.d, z3.d, z7.d\n"
- "scvtf z18.s, p1/m, z18.s\n"
- "uzp1 z6.d, z9.d, z31.d\n"
- "uzp2 z9.d, z9.d, z31.d\n"
- "scvtf z5.s, p1/m, z5.s\n"
- "fmla z8.s, p1/M, z18.s, z22.s\n"
- "scvtf z6.s, p1/m, z6.s\n"
- "scvtf z9.s, p1/m, z9.s\n"
- "fmla z29.s, p1/M, z5.s, z14.s\n"
- "fmla z27.s, p1/M, z6.s, z2.s\n"
- "fmla z10.s, p1/M, z9.s, z23.s\n"
- "bgt 3b\n"
- "mov x20, %x[res_ptr]\n"
- "subs x10, x10, #0x8\n"
- "add %x[res_ptr], %x[res_ptr], #0x20\n"
- "st1w { z24.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z15.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z12.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z0.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z13.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z1.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z20.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z25.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z11.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z16.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z19.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z26.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z8.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z29.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z27.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "st1w { z10.s }, p1, [x20]\n"
- "bne 2b\n"
- "mov x20, #0x4\n"
- "sub x13, x13, #0x10\n"
- "cmp x13, #0x10\n"
- "mov %x[res_ptr], x9\n"
- "madd %x[a_ptr], x20, x12, %x[a_ptr]\n"
- "bge 1b\n"
- "4:" // Row loop skip
- "cbz x13, 9f\n"
- "5:" // Row tail: Row loop
- "add x25, %x[b_ptr], #0x10\n"
- "mov x24, %x[nc]\n"
- "add x23, %x[res_ptr], %x[res_stride], LSL #2\n"
- "6:" // Row tail: Column loop
- "mov z24.b, #0x0\n"
- "mov z15.b, #0x0\n"
- "add x28, %x[a_ptr], #0x8\n"
- "mov x22, %x[nb]\n"
- "mov z12.b, #0x0\n"
- "mov z0.b, #0x0\n"
- "7:" // Row tail: Block loop
- "ld1b { z3.b }, p1/Z, [x25]\n"
- "ld1b { z6.b }, p1/Z, [x25, #1, MUL VL]\n"
- "mov z2.s, #0x0\n"
- "mov z25.s, #0x0\n"
- "ld1rqb { z26.b }, p1/Z, [x28]\n"
- "ld1rqb { z21.b }, p1/Z, [x28, #16]\n"
- "mov z27.s, #0x0\n"
- "mov z19.s, #0x0\n"
- "ld1b { z29.b }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x25, #3, MUL VL]\n"
- "sub x21, x25, #0x10\n"
- "sub x20, x28, #0x8\n"
- "lsl z20.b, z3.b, #0x4\n"
- "lsl z4.b, z6.b, #0x4\n"
- "ld1rqb { z10.b }, p1/Z, [x28, #32]\n"
- "ld1rqb { z23.b }, p1/Z, [x28, #48]\n"
- "and z3.b, z3.b, #0xf0\n"
- "and z6.b, z6.b, #0xf0\n"
- "ld1rqb { z11.b }, p1/Z, [x28, #64]\n"
- "ld1rqb { z7.b }, p1/Z, [x28, #80]\n"
- "lsl z8.b, z29.b, #0x4\n"
- "lsl z14.b, z16.b, #0x4\n"
- "ld1rqb { z18.b }, p1/Z, [x28, #96]\n"
- "ld1rqb { z30.b }, p1/Z, [x28, #112]\n"
- ".inst 0x45149b42 // smmla z2.s, z26.b, z20.b\n"
- ".inst 0x45049b59 // smmla z25.s, z26.b, z4.b\n"
- "and z29.b, z29.b, #0xf0\n"
- "ld1h { z17.s }, p1/Z, [x21]\n"
- ".inst 0x45149abb // smmla z27.s, z21.b, z20.b\n"
- ".inst 0x45049ab3 // smmla z19.s, z21.b, z4.b\n"
- "and z16.b, z16.b, #0xf0\n"
- "ld1h { z4.s }, p0/Z, [x20]\n"
- "subs x22, x22, #0x1\n"
- "add x28, x28, #0x88\n"
- "fcvt z17.s, p1/m, z17.h\n"
- "add x25, x25, #0x90\n"
- ".inst 0x45089942 // smmla z2.s, z10.b, z8.b\n"
- ".inst 0x450e9959 // smmla z25.s, z10.b, z14.b\n"
- "fcvt z4.s, p1/m, z4.h\n"
- ".inst 0x45089afb // smmla z27.s, z23.b, z8.b\n"
- ".inst 0x450e9af3 // smmla z19.s, z23.b, z14.b\n"
- "fscale z17.s, p1/m, z17.s, z28.s\n"
- "mov z4.q, z4.q[0]\n"
- ".inst 0x45039962 // smmla z2.s, z11.b, z3.b\n"
- ".inst 0x45069979 // smmla z25.s, z11.b, z6.b\n"
- "fmul z23.s, z17.s, z4.s[0]\n"
- "fmul z9.s, z17.s, z4.s[1]\n"
- "fmul z21.s, z17.s, z4.s[2]\n"
- "fmul z4.s, z17.s, z4.s[3]\n"
- ".inst 0x450398fb // smmla z27.s, z7.b, z3.b\n"
- ".inst 0x450698f3 // smmla z19.s, z7.b, z6.b\n"
- ".inst 0x451d9a42 // smmla z2.s, z18.b, z29.b\n"
- ".inst 0x45109a59 // smmla z25.s, z18.b, z16.b\n"
- ".inst 0x451d9bdb // smmla z27.s, z30.b, z29.b\n"
- ".inst 0x45109bd3 // smmla z19.s, z30.b, z16.b\n"
- "uzp1 z31.d, z2.d, z25.d\n"
- "uzp2 z13.d, z2.d, z25.d\n"
- "scvtf z31.s, p1/m, z31.s\n"
- "uzp1 z17.d, z27.d, z19.d\n"
- "uzp2 z18.d, z27.d, z19.d\n"
- "scvtf z13.s, p1/m, z13.s\n"
- "fmla z24.s, p1/M, z31.s, z23.s\n"
- "scvtf z17.s, p1/m, z17.s\n"
- "scvtf z18.s, p1/m, z18.s\n"
- "fmla z15.s, p1/M, z13.s, z9.s\n"
- "fmla z12.s, p1/M, z17.s, z21.s\n"
- "fmla z0.s, p1/M, z18.s, z4.s\n"
- "bgt 7b\n"
- "mov x20, %x[res_ptr]\n"
- "cmp x13, #0x1\n"
- "st1w { z24.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "ble 8f\n"
- "cmp x13, #0x2\n"
- "st1w { z15.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "ble 8f\n"
- "cmp x13, #0x3\n"
- "st1w { z12.s }, p1, [x20]\n"
- "add x20, x20, %x[res_stride]\n"
- "ble 8f\n"
- "st1w { z0.s }, p1, [x20]\n"
- "8:" // Row tail: Accumulator store skip
- "subs x24, x24, #0x8\n"
- "add %x[res_ptr], %x[res_ptr], #0x20\n"
- "bne 6b\n"
- "subs x13, x13, #0x4\n"
- "add %x[a_ptr], %x[a_ptr], x12\n"
- "mov %x[res_ptr], x23\n"
- "bgt 5b\n"
- "9:" // Row tail: Row loop skip
- : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr)
- : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc)
- : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
- );
- return;
- }
- else if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) {
- GGML_ASSERT((ggml_cpu_has_sve() && (ggml_sve_cnt_b == QK8_0)) &&
- "__ARM_FEATURE_SVE for vector size of 256-bits not defined, use the Q4_0_4_8 quantization format for optimal "
- "performance");
- }
- else if (ggml_cpu_has_neon()) {
- GGML_ASSERT(((ggml_cpu_has_sve() && (ggml_sve_cnt_b == QK8_0)) || ggml_cpu_has_matmul_int8()) &&
- "__ARM_FEATURE_SVE for vector size of 256-bits and __ARM_FEATURE_MATMUL_INT8 not defined, use the Q4_0_4_4 "
- "quantization format for optimal performance");
- }
- #endif
- #if defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
- GGML_ASSERT(ggml_cpu_has_sve() &&
- "__ARM_FEATURE_SVE not defined, use the Q4_0_4_8 quantization format for optimal performance");
- #elif defined(__ARM_NEON) && defined(__aarch64__)
- GGML_ASSERT((ggml_cpu_has_sve() || ggml_cpu_has_matmul_int8()) &&
- "__ARM_FEATURE_SVE and __ARM_FEATURE_MATMUL_INT8 not defined, use the Q4_0_4_4 quantization format for optimal "
- "performance");
- #else
- float sumf[4][8];
- int sumi;
- for (int y = 0; y < nr / 4; y++) {
- const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb);
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb);
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0;
- }
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) +
- (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4;
- }
- sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]);
- }
- }
- }
- }
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++)
- s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j];
- }
- }
- }
- #endif
- }
|