clip.cpp 112 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703
  1. /**
  2. * llama.cpp - commit 40c6d79fb52f995f47507fedfeaae2ac05d9b35c - do not edit this file
  3. *
  4. * MIT License
  5. *
  6. * Copyright (c) 2023-2024 The ggml authors
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. * SOFTWARE.
  25. */
  26. // NOTE: This is modified from clip.cpp only for LLaVA,
  27. // so there might be still unnecessary artifacts hanging around
  28. // I'll gradually clean and extend it
  29. // Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch
  30. #include "clip.h"
  31. #include "ggml.h"
  32. #include "ggml-cpu.h"
  33. #include "ggml-alloc.h"
  34. #include "ggml-backend.h"
  35. #ifdef GGML_USE_CUDA
  36. #include "ggml-cuda.h"
  37. #endif
  38. #ifdef GGML_USE_SYCL
  39. #include "ggml-sycl.h"
  40. #endif
  41. #ifdef GGML_USE_METAL
  42. #include "ggml-metal.h"
  43. #endif
  44. #ifdef GGML_USE_CANN
  45. #include "ggml-cann.h"
  46. #endif
  47. #ifdef GGML_USE_VULKAN
  48. #include "ggml-vulkan.h"
  49. #endif
  50. #define STB_IMAGE_IMPLEMENTATION
  51. #include "stb_image.h"
  52. #include <cassert>
  53. #include <cmath>
  54. #include <cstdlib>
  55. #include <cstring>
  56. #include <fstream>
  57. #include <map>
  58. #include <regex>
  59. #include <stdexcept>
  60. #include <vector>
  61. #include <sstream>
  62. #include <cinttypes>
  63. #include <limits>
  64. #if defined(LLAVA_LOG_OFF)
  65. # define LOG_INF(...)
  66. # define LOG_WRN(...)
  67. # define LOG_ERR(...)
  68. # define LOG_DBG(...)
  69. #else // defined(LLAVA_LOG_OFF)
  70. # define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0)
  71. # define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
  72. # define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
  73. # define LOG_DBG(...) do { fprintf(stdout, __VA_ARGS__); } while (0)
  74. #endif // defined(LLAVA_LOG_OFF)
  75. #if defined(_WIN32)
  76. #define WIN32_LEAN_AND_MEAN
  77. #ifndef NOMINMAX
  78. #define NOMINMAX
  79. #endif
  80. #include <windows.h>
  81. #if __GLIBCXX__
  82. #include <cstdio>
  83. #include <ext/stdio_filebuf.h>
  84. #include <fcntl.h>
  85. #endif
  86. #endif
  87. //#define CLIP_DEBUG_FUNCTIONS
  88. // RGB uint8 image
  89. struct clip_image_u8 {
  90. int nx;
  91. int ny;
  92. std::vector<uint8_t> buf;
  93. };
  94. // RGB float32 image (NHWC)
  95. // Memory layout: RGBRGBRGB...
  96. struct clip_image_f32 {
  97. int nx;
  98. int ny;
  99. std::vector<float> buf;
  100. };
  101. static std::string format(const char * fmt, ...) {
  102. va_list ap;
  103. va_list ap2;
  104. va_start(ap, fmt);
  105. va_copy(ap2, ap);
  106. int size = vsnprintf(NULL, 0, fmt, ap);
  107. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  108. std::vector<char> buf(size + 1);
  109. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  110. GGML_ASSERT(size2 == size);
  111. va_end(ap2);
  112. va_end(ap);
  113. return std::string(buf.data(), buf.size());
  114. }
  115. //
  116. // key constants
  117. //
  118. #define KEY_FTYPE "general.file_type"
  119. #define KEY_NAME "general.name"
  120. #define KEY_DESCRIPTION "general.description"
  121. #define KEY_HAS_TEXT_ENC "clip.has_text_encoder"
  122. #define KEY_HAS_VIS_ENC "clip.has_vision_encoder"
  123. #define KEY_HAS_LLAVA_PROJ "clip.has_llava_projector"
  124. #define KEY_HAS_MINICPMV_PROJ "clip.has_minicpmv_projector"
  125. #define KEY_MINICPMV_VERSION "clip.minicpmv_version"
  126. #define KEY_USE_GELU "clip.use_gelu"
  127. #define KEY_N_EMBD "clip.%s.embedding_length"
  128. #define KEY_N_FF "clip.%s.feed_forward_length"
  129. #define KEY_N_BLOCK "clip.%s.block_count"
  130. #define KEY_N_HEAD "clip.%s.attention.head_count"
  131. #define KEY_LAYER_NORM_EPS "clip.%s.attention.layer_norm_epsilon"
  132. #define KEY_PROJ_DIM "clip.%s.projection_dim"
  133. #define KEY_TOKENS "tokenizer.ggml.tokens"
  134. #define KEY_N_POSITIONS "clip.text.context_length"
  135. #define KEY_IMAGE_SIZE "clip.vision.image_size"
  136. #define KEY_PATCH_SIZE "clip.vision.patch_size"
  137. #define KEY_IMAGE_MEAN "clip.vision.image_mean"
  138. #define KEY_IMAGE_STD "clip.vision.image_std"
  139. #define KEY_PROJ_TYPE "clip.projector_type"
  140. #define KEY_MM_PATCH_MERGE_TYPE "clip.vision.mm_patch_merge_type"
  141. #define KEY_IMAGE_GRID_PINPOINTS "clip.vision.image_grid_pinpoints"
  142. #define KEY_IMAGE_CROP_RESOLUTION "clip.vision.image_crop_resolution"
  143. //
  144. // tensor name constants
  145. //
  146. #define TN_TOKEN_EMBD "%s.token_embd.weight"
  147. #define TN_POS_EMBD "%s.position_embd.weight"
  148. #define TN_CLASS_EMBD "v.class_embd"
  149. #define TN_PATCH_EMBD "v.patch_embd.weight"
  150. #define TN_PATCH_BIAS "v.patch_embd.bias"
  151. #define TN_ATTN_K "%s.blk.%d.attn_k.%s"
  152. #define TN_ATTN_Q "%s.blk.%d.attn_q.%s"
  153. #define TN_ATTN_V "%s.blk.%d.attn_v.%s"
  154. #define TN_ATTN_OUTPUT "%s.blk.%d.attn_out.%s"
  155. #define TN_FFN_DOWN "%s.blk.%d.ffn_down.%s"
  156. #define TN_FFN_UP "%s.blk.%d.ffn_up.%s"
  157. #define TN_LN_1 "%s.blk.%d.ln1.%s"
  158. #define TN_LN_2 "%s.blk.%d.ln2.%s"
  159. #define TN_LN_PRE "%s.pre_ln.%s"
  160. #define TN_LN_POST "%s.post_ln.%s"
  161. #define TN_TEXT_PROJ "text_projection.weight"
  162. #define TN_VIS_PROJ "visual_projection.weight"
  163. #define TN_LLAVA_PROJ "mm.%d.%s"
  164. #define TN_MVLM_PROJ_MLP "mm.model.mlp.%d.%s"
  165. #define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s"
  166. #define TN_MVLM_PROJ_PEG "mm.model.peg.%d.%s"
  167. #define TN_IMAGE_NEWLINE "model.image_newline"
  168. #define TN_MINICPMV_POS_EMBD_K "resampler.pos_embed_k"
  169. #define TN_MINICPMV_QUERY "resampler.query"
  170. #define TN_MINICPMV_PROJ "resampler.proj.weight"
  171. #define TN_MINICPMV_KV_PROJ "resampler.kv.weight"
  172. #define TN_MINICPMV_ATTN "resampler.attn.%s.%s"
  173. #define TN_MINICPMV_LN "resampler.ln_%s.%s"
  174. enum projector_type {
  175. PROJECTOR_TYPE_MLP,
  176. PROJECTOR_TYPE_MLP_NORM,
  177. PROJECTOR_TYPE_LDP,
  178. PROJECTOR_TYPE_LDPV2,
  179. PROJECTOR_TYPE_RESAMPLER,
  180. PROJECTOR_TYPE_UNKNOWN,
  181. };
  182. static std::map<projector_type, std::string> PROJECTOR_TYPE_NAMES = {
  183. { PROJECTOR_TYPE_MLP, "mlp" },
  184. { PROJECTOR_TYPE_LDP, "ldp" },
  185. { PROJECTOR_TYPE_LDPV2, "ldpv2"},
  186. { PROJECTOR_TYPE_RESAMPLER, "resampler"},
  187. };
  188. //
  189. // utilities to get data from a gguf file
  190. //
  191. static int get_key_idx(const gguf_context * ctx, const char * key) {
  192. int i = gguf_find_key(ctx, key);
  193. if (i == -1) {
  194. LOG_ERR("key %s not found in file\n", key);
  195. throw std::runtime_error(format("Missing required key: %s", key));
  196. }
  197. return i;
  198. }
  199. static uint32_t get_u32(const gguf_context * ctx, const std::string & key) {
  200. const int i = get_key_idx(ctx, key.c_str());
  201. return gguf_get_val_u32(ctx, i);
  202. }
  203. static float get_f32(const gguf_context * ctx, const std::string & key) {
  204. const int i = get_key_idx(ctx, key.c_str());
  205. return gguf_get_val_f32(ctx, i);
  206. }
  207. static struct ggml_tensor * get_tensor(struct ggml_context * ctx, const std::string & name) {
  208. struct ggml_tensor * cur = ggml_get_tensor(ctx, name.c_str());
  209. if (!cur) {
  210. throw std::runtime_error(format("%s: unable to find tensor %s\n", __func__, name.c_str()));
  211. }
  212. return cur;
  213. }
  214. static std::string get_ftype(int ftype) {
  215. return ggml_type_name(static_cast<ggml_type>(ftype));
  216. }
  217. static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
  218. switch (type) {
  219. case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
  220. case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
  221. case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
  222. case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
  223. case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
  224. case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
  225. case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
  226. case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
  227. case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
  228. case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
  229. case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
  230. default: return format("unknown type %d", type);
  231. }
  232. }
  233. static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
  234. if (search.empty()) {
  235. return;
  236. }
  237. std::string builder;
  238. builder.reserve(s.length());
  239. size_t pos = 0;
  240. size_t last_pos = 0;
  241. while ((pos = s.find(search, last_pos)) != std::string::npos) {
  242. builder.append(s, last_pos, pos - last_pos);
  243. builder.append(replace);
  244. last_pos = pos + search.length();
  245. }
  246. builder.append(s, last_pos, std::string::npos);
  247. s = std::move(builder);
  248. }
  249. static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
  250. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  251. switch (type) {
  252. case GGUF_TYPE_STRING:
  253. return gguf_get_val_str(ctx_gguf, i);
  254. case GGUF_TYPE_ARRAY:
  255. {
  256. const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
  257. int arr_n = gguf_get_arr_n(ctx_gguf, i);
  258. const void * data = gguf_get_arr_data(ctx_gguf, i);
  259. std::stringstream ss;
  260. ss << "[";
  261. for (int j = 0; j < arr_n; j++) {
  262. if (arr_type == GGUF_TYPE_STRING) {
  263. std::string val = gguf_get_arr_str(ctx_gguf, i, j);
  264. // escape quotes
  265. replace_all(val, "\\", "\\\\");
  266. replace_all(val, "\"", "\\\"");
  267. ss << '"' << val << '"';
  268. } else if (arr_type == GGUF_TYPE_ARRAY) {
  269. ss << "???";
  270. } else {
  271. ss << gguf_data_to_str(arr_type, data, j);
  272. }
  273. if (j < arr_n - 1) {
  274. ss << ", ";
  275. }
  276. }
  277. ss << "]";
  278. return ss.str();
  279. }
  280. default:
  281. return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
  282. }
  283. }
  284. static void print_tensor_info(const ggml_tensor * tensor, const char * prefix = "") {
  285. size_t tensor_size = ggml_nbytes(tensor);
  286. LOG_INF("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 "], type = %s\n",
  287. prefix, ggml_n_dims(tensor), tensor->name, tensor_size,
  288. tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], ggml_type_name(tensor->type));
  289. }
  290. static projector_type clip_projector_type_from_string(const std::string & name) {
  291. for (const auto & kv : PROJECTOR_TYPE_NAMES) { // NOLINT
  292. if (kv.second == name) {
  293. return kv.first;
  294. }
  295. }
  296. return PROJECTOR_TYPE_UNKNOWN;
  297. }
  298. #ifdef CLIP_DEBUG_FUNCTIONS
  299. static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) {
  300. std::ofstream file(filename, std::ios::binary);
  301. if (!file.is_open()) {
  302. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  303. return;
  304. }
  305. // PPM header: P6 format, width, height, and max color value
  306. file << "P6\n" << img.nx << " " << img.ny << "\n255\n";
  307. // Write pixel data
  308. for (size_t i = 0; i < img.buf.size(); i += 3) {
  309. // PPM expects binary data in RGB format, which matches our image buffer
  310. file.write(reinterpret_cast<const char*>(&img.buf[i]), 3);
  311. }
  312. file.close();
  313. }
  314. static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) {
  315. std::ofstream file(filename, std::ios::binary);
  316. if (!file.is_open()) {
  317. LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
  318. return;
  319. }
  320. int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data
  321. int bytesPerPixel = 3;
  322. int widthInBytes = img.nx * bytesPerPixel;
  323. int paddingAmount = (4 - (widthInBytes % 4)) % 4;
  324. int stride = widthInBytes + paddingAmount;
  325. // Bitmap file header
  326. unsigned char fileHeader[14] = {
  327. 'B','M', // Signature
  328. 0,0,0,0, // Image file size in bytes
  329. 0,0,0,0, // Reserved
  330. 54,0,0,0 // Start of pixel array
  331. };
  332. // Total file size
  333. fileSize = 54 + (stride * img.ny);
  334. fileHeader[2] = (unsigned char)(fileSize);
  335. fileHeader[3] = (unsigned char)(fileSize >> 8);
  336. fileHeader[4] = (unsigned char)(fileSize >> 16);
  337. fileHeader[5] = (unsigned char)(fileSize >> 24);
  338. // Bitmap information header (BITMAPINFOHEADER)
  339. unsigned char infoHeader[40] = {
  340. 40,0,0,0, // Size of this header (40 bytes)
  341. 0,0,0,0, // Image width
  342. 0,0,0,0, // Image height
  343. 1,0, // Number of color planes
  344. 24,0, // Bits per pixel
  345. 0,0,0,0, // No compression
  346. 0,0,0,0, // Image size (can be 0 for no compression)
  347. 0,0,0,0, // X pixels per meter (not specified)
  348. 0,0,0,0, // Y pixels per meter (not specified)
  349. 0,0,0,0, // Total colors (color table not used)
  350. 0,0,0,0 // Important colors (all are important)
  351. };
  352. // Width and height in the information header
  353. infoHeader[4] = (unsigned char)(img.nx);
  354. infoHeader[5] = (unsigned char)(img.nx >> 8);
  355. infoHeader[6] = (unsigned char)(img.nx >> 16);
  356. infoHeader[7] = (unsigned char)(img.nx >> 24);
  357. infoHeader[8] = (unsigned char)(img.ny);
  358. infoHeader[9] = (unsigned char)(img.ny >> 8);
  359. infoHeader[10] = (unsigned char)(img.ny >> 16);
  360. infoHeader[11] = (unsigned char)(img.ny >> 24);
  361. // Write file headers
  362. file.write(reinterpret_cast<char*>(fileHeader), sizeof(fileHeader));
  363. file.write(reinterpret_cast<char*>(infoHeader), sizeof(infoHeader));
  364. // Pixel data
  365. std::vector<unsigned char> padding(3, 0); // Max padding size to be added to each row
  366. for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top
  367. for (int x = 0; x < img.nx; ++x) {
  368. // Each pixel
  369. size_t pixelIndex = (y * img.nx + x) * 3;
  370. unsigned char pixel[3] = {
  371. img.buf[pixelIndex + 2], // BMP stores pixels in BGR format
  372. img.buf[pixelIndex + 1],
  373. img.buf[pixelIndex]
  374. };
  375. file.write(reinterpret_cast<char*>(pixel), 3);
  376. }
  377. // Write padding for the row
  378. file.write(reinterpret_cast<char*>(padding.data()), paddingAmount);
  379. }
  380. file.close();
  381. }
  382. // debug function to convert f32 to u8
  383. static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) {
  384. dst.nx = src.nx;
  385. dst.ny = src.ny;
  386. dst.buf.resize(3 * src.nx * src.ny);
  387. for (size_t i = 0; i < src.buf.size(); ++i) {
  388. dst.buf[i] = static_cast<uint8_t>(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255));
  389. }
  390. }
  391. #endif
  392. //
  393. // clip layers
  394. //
  395. struct clip_hparams {
  396. int32_t image_size;
  397. int32_t patch_size;
  398. int32_t hidden_size;
  399. int32_t n_intermediate;
  400. int32_t projection_dim;
  401. int32_t n_head;
  402. int32_t n_layer;
  403. float eps;
  404. char mm_patch_merge_type[32] = "flat"; // spatial_unpad or flat (default)
  405. int32_t image_grid_pinpoints[32];
  406. int32_t image_crop_resolution;
  407. };
  408. struct clip_layer {
  409. // attention
  410. struct ggml_tensor * k_w;
  411. struct ggml_tensor * k_b;
  412. struct ggml_tensor * q_w;
  413. struct ggml_tensor * q_b;
  414. struct ggml_tensor * v_w;
  415. struct ggml_tensor * v_b;
  416. struct ggml_tensor * o_w;
  417. struct ggml_tensor * o_b;
  418. // layernorm 1
  419. struct ggml_tensor * ln_1_w;
  420. struct ggml_tensor * ln_1_b;
  421. // ff
  422. struct ggml_tensor * ff_i_w;
  423. struct ggml_tensor * ff_i_b;
  424. struct ggml_tensor * ff_o_w;
  425. struct ggml_tensor * ff_o_b;
  426. // layernorm 2
  427. struct ggml_tensor * ln_2_w;
  428. struct ggml_tensor * ln_2_b;
  429. };
  430. struct clip_vision_model {
  431. struct clip_hparams hparams;
  432. // embeddings
  433. struct ggml_tensor * class_embedding;
  434. struct ggml_tensor * patch_embeddings;
  435. struct ggml_tensor * patch_bias;
  436. struct ggml_tensor * position_embeddings;
  437. struct ggml_tensor * pre_ln_w;
  438. struct ggml_tensor * pre_ln_b;
  439. std::vector<clip_layer> layers;
  440. struct ggml_tensor * post_ln_w;
  441. struct ggml_tensor * post_ln_b;
  442. struct ggml_tensor * projection;
  443. // LLaVA projection
  444. struct ggml_tensor * mm_0_w = NULL;
  445. struct ggml_tensor * mm_0_b = NULL;
  446. struct ggml_tensor * mm_2_w = NULL;
  447. struct ggml_tensor * mm_2_b = NULL;
  448. struct ggml_tensor * image_newline = NULL;
  449. // Yi type models with mlp+normalization projection
  450. struct ggml_tensor * mm_1_w = NULL; // Yi type models have 0, 1, 3, 4
  451. struct ggml_tensor * mm_1_b = NULL;
  452. struct ggml_tensor * mm_3_w = NULL;
  453. struct ggml_tensor * mm_3_b = NULL;
  454. struct ggml_tensor * mm_4_w = NULL;
  455. struct ggml_tensor * mm_4_b = NULL;
  456. // MobileVLM projection
  457. struct ggml_tensor * mm_model_mlp_1_w;
  458. struct ggml_tensor * mm_model_mlp_1_b;
  459. struct ggml_tensor * mm_model_mlp_3_w;
  460. struct ggml_tensor * mm_model_mlp_3_b;
  461. struct ggml_tensor * mm_model_block_1_block_0_0_w;
  462. struct ggml_tensor * mm_model_block_1_block_0_1_w;
  463. struct ggml_tensor * mm_model_block_1_block_0_1_b;
  464. struct ggml_tensor * mm_model_block_1_block_1_fc1_w;
  465. struct ggml_tensor * mm_model_block_1_block_1_fc1_b;
  466. struct ggml_tensor * mm_model_block_1_block_1_fc2_w;
  467. struct ggml_tensor * mm_model_block_1_block_1_fc2_b;
  468. struct ggml_tensor * mm_model_block_1_block_2_0_w;
  469. struct ggml_tensor * mm_model_block_1_block_2_1_w;
  470. struct ggml_tensor * mm_model_block_1_block_2_1_b;
  471. struct ggml_tensor * mm_model_block_2_block_0_0_w;
  472. struct ggml_tensor * mm_model_block_2_block_0_1_w;
  473. struct ggml_tensor * mm_model_block_2_block_0_1_b;
  474. struct ggml_tensor * mm_model_block_2_block_1_fc1_w;
  475. struct ggml_tensor * mm_model_block_2_block_1_fc1_b;
  476. struct ggml_tensor * mm_model_block_2_block_1_fc2_w;
  477. struct ggml_tensor * mm_model_block_2_block_1_fc2_b;
  478. struct ggml_tensor * mm_model_block_2_block_2_0_w;
  479. struct ggml_tensor * mm_model_block_2_block_2_1_w;
  480. struct ggml_tensor * mm_model_block_2_block_2_1_b;
  481. // MobileVLM_V2 projection
  482. struct ggml_tensor * mm_model_mlp_0_w;
  483. struct ggml_tensor * mm_model_mlp_0_b;
  484. struct ggml_tensor * mm_model_mlp_2_w;
  485. struct ggml_tensor * mm_model_mlp_2_b;
  486. struct ggml_tensor * mm_model_peg_0_w;
  487. struct ggml_tensor * mm_model_peg_0_b;
  488. // MINICPMV projection
  489. struct ggml_tensor * mm_model_pos_embed_k;
  490. struct ggml_tensor * mm_model_query;
  491. struct ggml_tensor * mm_model_proj;
  492. struct ggml_tensor * mm_model_kv_proj;
  493. struct ggml_tensor * mm_model_attn_q_w;
  494. struct ggml_tensor * mm_model_attn_q_b;
  495. struct ggml_tensor * mm_model_attn_k_w;
  496. struct ggml_tensor * mm_model_attn_k_b;
  497. struct ggml_tensor * mm_model_attn_v_w;
  498. struct ggml_tensor * mm_model_attn_v_b;
  499. struct ggml_tensor * mm_model_attn_o_w;
  500. struct ggml_tensor * mm_model_attn_o_b;
  501. struct ggml_tensor * mm_model_ln_q_w;
  502. struct ggml_tensor * mm_model_ln_q_b;
  503. struct ggml_tensor * mm_model_ln_kv_w;
  504. struct ggml_tensor * mm_model_ln_kv_b;
  505. struct ggml_tensor * mm_model_ln_post_w;
  506. struct ggml_tensor * mm_model_ln_post_b;
  507. };
  508. struct clip_ctx {
  509. bool has_text_encoder = false;
  510. bool has_vision_encoder = false;
  511. bool has_llava_projector = false;
  512. bool has_minicpmv_projector = false;
  513. int minicpmv_version = 2;
  514. struct clip_vision_model vision_model;
  515. projector_type proj_type = PROJECTOR_TYPE_MLP;
  516. float image_mean[3];
  517. float image_std[3];
  518. bool use_gelu = false;
  519. int32_t ftype = 1;
  520. bool has_class_embedding = true;
  521. bool has_pre_norm = true;
  522. bool has_post_norm = false;
  523. bool has_patch_bias = false;
  524. struct gguf_context * ctx_gguf;
  525. struct ggml_context * ctx_data;
  526. std::vector<uint8_t> buf_compute_meta;
  527. // memory buffers to evaluate the model
  528. ggml_backend_buffer_t params_buffer = NULL;
  529. ggml_backend_t backend = NULL;
  530. ggml_gallocr_t compute_alloc = NULL;
  531. struct clip_image_size * load_image_size;
  532. };
  533. static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch * imgs, struct clip_image_size * load_image_size, bool is_inf = false) {
  534. if (!ctx->has_vision_encoder) {
  535. LOG_ERR("This gguf file seems to have no vision encoder\n");
  536. return nullptr;
  537. }
  538. const auto & model = ctx->vision_model;
  539. const auto & hparams = model.hparams;
  540. const int image_size = hparams.image_size;
  541. int image_size_width = image_size;
  542. int image_size_height = image_size;
  543. if (ctx->has_minicpmv_projector) {
  544. if (load_image_size == nullptr) {
  545. load_image_size = clip_image_size_init();
  546. }
  547. LOG_DBG("%s: %d %d\n", __func__, load_image_size->width, load_image_size->height);
  548. image_size_width = load_image_size->width;
  549. image_size_height = load_image_size->height;
  550. if (is_inf) {
  551. image_size_width = imgs->data->nx;
  552. image_size_height = imgs->data->ny;
  553. }
  554. }
  555. const int patch_size = hparams.patch_size;
  556. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  557. const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0);
  558. const int hidden_size = hparams.hidden_size;
  559. const int n_head = hparams.n_head;
  560. const int d_head = hidden_size / n_head;
  561. int n_layer = hparams.n_layer;
  562. const float eps = hparams.eps;
  563. const int batch_size = imgs->size;
  564. if (ctx->has_llava_projector || ctx->has_minicpmv_projector) {
  565. GGML_ASSERT(batch_size == 1);
  566. }
  567. struct ggml_init_params params = {
  568. /*.mem_size =*/ ctx->buf_compute_meta.size(),
  569. /*.mem_buffer =*/ ctx->buf_compute_meta.data(),
  570. /*.no_alloc =*/ true,
  571. };
  572. struct ggml_context * ctx0 = ggml_init(params);
  573. struct ggml_cgraph * gf = ggml_new_graph(ctx0);
  574. struct ggml_tensor * inp_raw = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, image_size_width, image_size_height, 3, batch_size);
  575. ggml_set_name(inp_raw, "inp_raw");
  576. ggml_set_input(inp_raw);
  577. struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
  578. inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size);
  579. inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3));
  580. if (ctx->has_patch_bias) {
  581. // inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp));
  582. inp = ggml_add(ctx0, inp, model.patch_bias);
  583. }
  584. struct ggml_tensor * embeddings = inp;
  585. struct ggml_tensor * pos_embed = nullptr;
  586. if (ctx->has_llava_projector) {
  587. // concat class_embeddings and patch_embeddings
  588. if (ctx->has_class_embedding) {
  589. embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
  590. ggml_set_name(embeddings, "embeddings");
  591. ggml_set_input(embeddings);
  592. embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
  593. embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
  594. embeddings = ggml_acc(ctx0, embeddings, inp,
  595. embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
  596. }
  597. }
  598. struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
  599. ggml_set_name(positions, "positions");
  600. ggml_set_input(positions);
  601. embeddings =
  602. ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions));
  603. if (ctx->has_minicpmv_projector) {
  604. int pos_w = image_size_width/patch_size;
  605. int pos_h = image_size_height/patch_size;
  606. if (ctx->minicpmv_version == 2) {
  607. pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 4096, pos_w * pos_h, 1);
  608. }
  609. else if (ctx->minicpmv_version == 3) {
  610. pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 3584, pos_w * pos_h, 1);
  611. }
  612. ggml_set_name(pos_embed, "pos_embed");
  613. ggml_set_input(pos_embed);
  614. }
  615. // pre-layernorm
  616. if (ctx->has_pre_norm) {
  617. embeddings = ggml_norm(ctx0, embeddings, eps);
  618. ggml_set_name(embeddings, "pre_ln");
  619. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b);
  620. }
  621. // loop over layers
  622. if (ctx->has_minicpmv_projector) {
  623. n_layer += 1;
  624. }
  625. for (int il = 0; il < n_layer - 1; il++) {
  626. struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states
  627. //const size_t nb_q_w = model.layers[il].q_w->nb[0];
  628. // layernorm1
  629. {
  630. cur = ggml_norm(ctx0, cur, eps);
  631. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w),
  632. model.layers[il].ln_1_b);
  633. }
  634. // self-attention
  635. {
  636. struct ggml_tensor * Q =
  637. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].q_w, cur), model.layers[il].q_b);
  638. Q = ggml_scale_inplace(ctx0, Q, 1.0f / sqrt((float)d_head));
  639. Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_positions, batch_size);
  640. Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
  641. Q = ggml_reshape_3d(ctx0, Q, d_head, num_positions, n_head * batch_size);
  642. struct ggml_tensor * K =
  643. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].k_w, cur), model.layers[il].k_b);
  644. K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size);
  645. K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
  646. K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size);
  647. struct ggml_tensor * V =
  648. ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].v_w, cur), model.layers[il].v_b);
  649. V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size);
  650. V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
  651. V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size);
  652. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  653. KQ = ggml_soft_max_inplace(ctx0, KQ);
  654. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
  655. KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_positions, n_head, batch_size);
  656. KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  657. cur = ggml_cont_3d(ctx0, KQV, hidden_size, num_positions, batch_size);
  658. }
  659. // attention output
  660. cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].o_w, cur), model.layers[il].o_b);
  661. // re-add the layer input, e.g., residual
  662. cur = ggml_add(ctx0, cur, embeddings);
  663. embeddings = cur; // embeddings = residual, cur = hidden_states
  664. // layernorm2
  665. {
  666. cur = ggml_norm(ctx0, cur, eps);
  667. cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b);
  668. }
  669. cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur);
  670. cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b);
  671. if (ctx->use_gelu) {
  672. cur = ggml_gelu_inplace(ctx0, cur);
  673. } else {
  674. cur = ggml_gelu_quick_inplace(ctx0, cur);
  675. }
  676. cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur);
  677. cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b);
  678. // residual 2
  679. cur = ggml_add(ctx0, embeddings, cur);
  680. embeddings = cur;
  681. }
  682. // post-layernorm
  683. if (ctx->has_post_norm) {
  684. embeddings = ggml_norm(ctx0, embeddings, eps);
  685. ggml_set_name(embeddings, "post_ln");
  686. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
  687. }
  688. // llava projector
  689. if (ctx->has_llava_projector) {
  690. embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
  691. struct ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_patches);
  692. ggml_set_name(patches, "patches");
  693. ggml_set_input(patches);
  694. // shape [1, 576, 1024]
  695. // ne is whcn, ne = [1024, 576, 1, 1]
  696. embeddings = ggml_get_rows(ctx0, embeddings, patches);
  697. // print_tensor_info(embeddings, "embeddings");
  698. // llava projector
  699. if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
  700. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  701. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  702. embeddings = ggml_gelu(ctx0, embeddings);
  703. embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
  704. embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
  705. }
  706. else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
  707. embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
  708. embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
  709. // ggml_tensor_printf(embeddings, "mm_0_w",0,true,false);
  710. // First LayerNorm
  711. embeddings = ggml_norm(ctx0, embeddings, eps);
  712. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_1_w),
  713. model.mm_1_b);
  714. // GELU activation
  715. embeddings = ggml_gelu(ctx0, embeddings);
  716. // Second linear layer
  717. embeddings = ggml_mul_mat(ctx0, model.mm_3_w, embeddings);
  718. embeddings = ggml_add(ctx0, embeddings, model.mm_3_b);
  719. // Second LayerNorm
  720. embeddings = ggml_norm(ctx0, embeddings, eps);
  721. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_4_w),
  722. model.mm_4_b);
  723. }
  724. else if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
  725. // MobileVLM projector
  726. int n_patch = 24;
  727. struct ggml_tensor * mlp_1 = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings);
  728. mlp_1 = ggml_add(ctx0, mlp_1, model.mm_model_mlp_1_b);
  729. mlp_1 = ggml_gelu(ctx0, mlp_1);
  730. struct ggml_tensor * mlp_3 = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, mlp_1);
  731. mlp_3 = ggml_add(ctx0, mlp_3, model.mm_model_mlp_3_b);
  732. // mlp_3 shape = [1, 576, 2048], ne = [2048, 576, 1, 1]
  733. // block 1
  734. struct ggml_tensor * block_1 = nullptr;
  735. {
  736. // transpose from [1, 576, 2048] --> [1, 2048, 576] --> [1, 2048, 24, 24]
  737. mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3));
  738. mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
  739. // stride = 1, padding = 1, bias is nullptr
  740. block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1);
  741. // layer norm
  742. // // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  743. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  744. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  745. block_1 = ggml_norm(ctx0, block_1, eps);
  746. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_0_1_w), model.mm_model_block_1_block_0_1_b);
  747. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  748. // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  749. // hardswish
  750. struct ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  751. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  752. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  753. // pointwise conv
  754. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  755. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc1_w, block_1);
  756. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc1_b);
  757. block_1 = ggml_relu(ctx0, block_1);
  758. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc2_w, block_1);
  759. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc2_b);
  760. block_1 = ggml_hardsigmoid(ctx0, block_1);
  761. // block_1_hw shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1], block_1 shape = [1, 2048], ne = [2048, 1, 1, 1]
  762. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  763. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  764. int w = block_1->ne[0], h = block_1->ne[1];
  765. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  766. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  767. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  768. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_2_0_w, block_1);
  769. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  770. // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
  771. block_1 = ggml_norm(ctx0, block_1, eps);
  772. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_2_1_w), model.mm_model_block_1_block_2_1_b);
  773. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  774. // block1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
  775. // residual
  776. block_1 = ggml_add(ctx0, mlp_3, block_1);
  777. }
  778. // block_2
  779. {
  780. // stride = 2
  781. block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1);
  782. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  783. // layer norm
  784. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
  785. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  786. block_1 = ggml_norm(ctx0, block_1, eps);
  787. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_0_1_w), model.mm_model_block_2_block_0_1_b);
  788. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
  789. // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
  790. // hardswish
  791. struct ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
  792. // not sure the parameters is right for globalAvgPooling
  793. block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
  794. // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  795. // pointwise conv
  796. block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
  797. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc1_w, block_1);
  798. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc1_b);
  799. block_1 = ggml_relu(ctx0, block_1);
  800. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc2_w, block_1);
  801. block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc2_b);
  802. block_1 = ggml_hardsigmoid(ctx0, block_1);
  803. // block_1_hw shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1], block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
  804. block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
  805. block_1 = ggml_mul(ctx0, block_1_hw, block_1);
  806. int w = block_1->ne[0], h = block_1->ne[1];
  807. block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
  808. block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
  809. // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
  810. block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_2_0_w, block_1);
  811. block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
  812. // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
  813. block_1 = ggml_norm(ctx0, block_1, eps);
  814. block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_2_1_w), model.mm_model_block_2_block_2_1_b);
  815. block_1 = ggml_reshape_3d(ctx0, block_1, block_1->ne[0], block_1->ne[1] * block_1->ne[2], block_1->ne[3]);
  816. // block_1 shape = [1, 144, 2048], ne = [2048, 144, 1]
  817. }
  818. embeddings = block_1;
  819. }
  820. else if (ctx->proj_type == PROJECTOR_TYPE_LDPV2)
  821. {
  822. int n_patch = 24;
  823. struct ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
  824. mlp_0 = ggml_add(ctx0, mlp_0, model.mm_model_mlp_0_b);
  825. mlp_0 = ggml_gelu(ctx0, mlp_0);
  826. struct ggml_tensor * mlp_2 = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, mlp_0);
  827. mlp_2 = ggml_add(ctx0, mlp_2, model.mm_model_mlp_2_b);
  828. // mlp_2 ne = [2048, 576, 1, 1]
  829. // // AVG Pool Layer 2*2, strides = 2
  830. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 0, 2, 3));
  831. // mlp_2 ne = [576, 2048, 1, 1]
  832. mlp_2 = ggml_reshape_4d(ctx0, mlp_2, n_patch, n_patch, mlp_2->ne[1], mlp_2->ne[2]);
  833. // mlp_2 ne [24, 24, 2048, 1]
  834. mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0);
  835. // weight ne = [3, 3, 2048, 1]
  836. struct ggml_tensor * peg_0 = ggml_conv_depthwise_2d(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1);
  837. peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3));
  838. peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b);
  839. mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 2, 0, 3));
  840. peg_0 = ggml_add(ctx0, peg_0, mlp_2);
  841. peg_0 = ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]);
  842. embeddings = peg_0;
  843. }
  844. else {
  845. GGML_ABORT("fatal error");
  846. }
  847. }
  848. // minicpmv projector
  849. else if (ctx->has_minicpmv_projector)
  850. {
  851. if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) {
  852. struct ggml_tensor * q = model.mm_model_query;
  853. { // layernorm
  854. q = ggml_norm(ctx0, q, eps);
  855. q = ggml_add(ctx0, ggml_mul(ctx0, q, model.mm_model_ln_q_w), model.mm_model_ln_q_b);
  856. }
  857. struct ggml_tensor * v = ggml_mul_mat(ctx0, model.mm_model_kv_proj, embeddings);
  858. { // layernorm
  859. v = ggml_norm(ctx0, v, eps);
  860. v = ggml_add(ctx0, ggml_mul(ctx0, v, model.mm_model_ln_kv_w), model.mm_model_ln_kv_b);
  861. }
  862. struct ggml_tensor * k;
  863. { // position
  864. // q = ggml_add(ctx0, q, model.mm_model_pos_embed);
  865. k = ggml_add(ctx0, v, pos_embed);
  866. }
  867. { // attention
  868. int hidden_size = 4096;
  869. const int d_head = 128;
  870. int n_head = hidden_size/d_head;
  871. int num_query = 96;
  872. if (ctx->minicpmv_version == 2) {
  873. hidden_size = 4096;
  874. n_head = hidden_size/d_head;
  875. num_query = 96;
  876. }
  877. else if (ctx->minicpmv_version == 3) {
  878. hidden_size = 3584;
  879. n_head = hidden_size/d_head;
  880. num_query = 64;
  881. }
  882. struct ggml_tensor * Q = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_q_w, q), model.mm_model_attn_q_b);
  883. Q = ggml_scale_inplace(ctx0, Q, 1.0f / sqrt((float)d_head));
  884. struct ggml_tensor * K = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_k_w, k), model.mm_model_attn_k_b);
  885. struct ggml_tensor * V = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_v_w, v), model.mm_model_attn_v_b);
  886. // permute
  887. Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_query, batch_size);
  888. Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
  889. Q = ggml_reshape_3d(ctx0, Q, d_head, num_query, n_head * batch_size);
  890. K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size);
  891. K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
  892. K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size);
  893. V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size);
  894. V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
  895. V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size);
  896. struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
  897. KQ = ggml_soft_max_inplace(ctx0, KQ);
  898. struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
  899. KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_query, n_head, batch_size);
  900. KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
  901. KQV = ggml_cont_3d(ctx0, KQV, hidden_size, num_query, batch_size);
  902. embeddings = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_o_w, KQV), model.mm_model_attn_o_b);
  903. }
  904. { // layernorm
  905. embeddings = ggml_norm(ctx0, embeddings, eps);
  906. embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_post_w), model.mm_model_ln_post_b);
  907. }
  908. embeddings = ggml_mul_mat(ctx0, model.mm_model_proj, embeddings);
  909. }
  910. else {
  911. GGML_ASSERT(false);
  912. }
  913. }
  914. // build the graph
  915. ggml_build_forward_expand(gf, embeddings);
  916. ggml_free(ctx0);
  917. return gf;
  918. }
  919. // read and create ggml_context containing the tensors and their data
  920. struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
  921. struct ggml_context * meta = NULL;
  922. struct gguf_init_params params = {
  923. /*.no_alloc = */ true,
  924. /*.ctx = */ &meta,
  925. };
  926. struct gguf_context * ctx = gguf_init_from_file(fname, params);
  927. if (!ctx) {
  928. throw std::runtime_error(format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname));
  929. }
  930. if (verbosity >= 1) {
  931. const int n_tensors = gguf_get_n_tensors(ctx);
  932. const int n_kv = gguf_get_n_kv(ctx);
  933. const int ftype = get_u32(ctx, KEY_FTYPE);
  934. const std::string ftype_str = get_ftype(ftype);
  935. const int idx_desc = get_key_idx(ctx, KEY_DESCRIPTION);
  936. const std::string description = gguf_get_val_str(ctx, idx_desc);
  937. const int idx_name = gguf_find_key(ctx, KEY_NAME);
  938. if (idx_name != -1) { // make name optional temporarily as some of the uploaded models missing it due to a bug
  939. const std::string name = gguf_get_val_str(ctx, idx_name);
  940. LOG_INF("%s: model name: %s\n", __func__, name.c_str());
  941. }
  942. LOG_INF("%s: description: %s\n", __func__, description.c_str());
  943. LOG_INF("%s: GGUF version: %d\n", __func__, gguf_get_version(ctx));
  944. LOG_INF("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx));
  945. LOG_INF("%s: n_tensors: %d\n", __func__, n_tensors);
  946. LOG_INF("%s: n_kv: %d\n", __func__, n_kv);
  947. LOG_INF("%s: ftype: %s\n", __func__, ftype_str.c_str());
  948. LOG_INF("\n");
  949. }
  950. const int n_tensors = gguf_get_n_tensors(ctx);
  951. // kv
  952. const int n_kv = gguf_get_n_kv(ctx);
  953. LOG_INF("%s: loaded meta data with %d key-value pairs and %d tensors from %s\n",
  954. __func__, n_kv, n_tensors, fname);
  955. {
  956. std::map<enum ggml_type, uint32_t> n_type;
  957. for (int i = 0; i < n_tensors; i++) {
  958. enum ggml_type type = gguf_get_tensor_type(ctx, i);
  959. n_type[type]++;
  960. }
  961. LOG_INF("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
  962. for (int i = 0; i < n_kv; i++) {
  963. const char * name = gguf_get_key(ctx, i);
  964. const enum gguf_type type = gguf_get_kv_type(ctx, i);
  965. const std::string type_name =
  966. type == GGUF_TYPE_ARRAY
  967. ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(ctx, i)), gguf_get_arr_n(ctx, i))
  968. : gguf_type_name(type);
  969. std::string value = gguf_kv_to_str(ctx, i);
  970. const size_t MAX_VALUE_LEN = 40;
  971. if (value.size() > MAX_VALUE_LEN) {
  972. value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
  973. }
  974. replace_all(value, "\n", "\\n");
  975. LOG_INF("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
  976. }
  977. // print type counts
  978. for (auto & kv : n_type) {
  979. if (kv.second == 0) {
  980. continue;
  981. }
  982. LOG_INF("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second);
  983. }
  984. }
  985. // data
  986. size_t model_size = 0;
  987. {
  988. for (int i = 0; i < n_tensors; ++i) {
  989. const char * name = gguf_get_tensor_name(ctx, i);
  990. const size_t offset = gguf_get_tensor_offset(ctx, i);
  991. enum ggml_type type = gguf_get_tensor_type(ctx, i);
  992. struct ggml_tensor * cur = ggml_get_tensor(meta, name);
  993. size_t tensor_size = ggml_nbytes(cur);
  994. model_size += tensor_size;
  995. if (verbosity >= 3) {
  996. LOG_INF("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
  997. __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
  998. }
  999. }
  1000. }
  1001. clip_ctx * new_clip = new clip_ctx{};
  1002. // update projector type
  1003. {
  1004. int idx = gguf_find_key(ctx, KEY_PROJ_TYPE);
  1005. if (idx != -1) {
  1006. const std::string proj_type = gguf_get_val_str(ctx, idx);
  1007. new_clip->proj_type = clip_projector_type_from_string(proj_type);
  1008. } else {
  1009. new_clip->proj_type = PROJECTOR_TYPE_MLP;
  1010. }
  1011. if (new_clip->proj_type == PROJECTOR_TYPE_MLP) {
  1012. if (gguf_find_tensor(ctx, format(TN_LLAVA_PROJ, 3, "weight").c_str()) != -1) {
  1013. new_clip->proj_type = PROJECTOR_TYPE_MLP_NORM;
  1014. }
  1015. }
  1016. }
  1017. #ifdef GGML_USE_CUDA
  1018. new_clip->backend = ggml_backend_cuda_init(0);
  1019. LOG_INF("%s: CLIP using CUDA backend\n", __func__);
  1020. #endif
  1021. #ifdef GGML_USE_METAL
  1022. new_clip->backend = ggml_backend_metal_init();
  1023. LOG_INF("%s: CLIP using Metal backend\n", __func__);
  1024. #endif
  1025. #ifdef GGML_USE_CANN
  1026. new_clip->backend = ggml_backend_cann_init(0);
  1027. LOG_INF("%s: CLIP using CANN backend\n", __func__);
  1028. #endif
  1029. #ifdef GGML_USE_VULKAN
  1030. new_clip->backend = ggml_backend_vk_init(0);
  1031. LOG_INF("%s: CLIP using Vulkan backend\n", __func__);
  1032. #endif
  1033. #ifdef GGML_USE_SYCL
  1034. new_clip->backend = ggml_backend_sycl_init(0);
  1035. LOG_INF("%s: CLIP using SYCL backend\n", __func__);
  1036. #endif
  1037. if (!new_clip->backend) {
  1038. new_clip->backend = ggml_backend_cpu_init();
  1039. LOG_INF("%s: CLIP using CPU backend\n", __func__);
  1040. }
  1041. // model size and capabilities
  1042. {
  1043. int idx = get_key_idx(ctx, KEY_HAS_TEXT_ENC);
  1044. new_clip->has_text_encoder = gguf_get_val_bool(ctx, idx);
  1045. idx = get_key_idx(ctx, KEY_HAS_VIS_ENC);
  1046. new_clip->has_vision_encoder = gguf_get_val_bool(ctx, idx);
  1047. idx = gguf_find_key(ctx, KEY_HAS_LLAVA_PROJ);
  1048. if (idx != -1) {
  1049. new_clip->has_llava_projector = gguf_get_val_bool(ctx, idx);
  1050. }
  1051. idx = gguf_find_key(ctx, KEY_HAS_MINICPMV_PROJ);
  1052. if (idx != -1) {
  1053. new_clip->has_minicpmv_projector = gguf_get_val_bool(ctx, idx);
  1054. }
  1055. idx = gguf_find_key(ctx, KEY_MINICPMV_VERSION);
  1056. if (idx != -1) {
  1057. new_clip->minicpmv_version = gguf_get_val_i32(ctx, idx);
  1058. }
  1059. // GGML_ASSERT(new_clip->has_llava_projector); // see monatis/clip.cpp for image and/or text encoding for semantic search
  1060. GGML_ASSERT(new_clip->has_vision_encoder);
  1061. GGML_ASSERT(!new_clip->has_text_encoder);
  1062. idx = get_key_idx(ctx, KEY_USE_GELU);
  1063. new_clip->use_gelu = gguf_get_val_bool(ctx, idx);
  1064. if (verbosity >= 1) {
  1065. LOG_INF("%s: text_encoder: %d\n", __func__, new_clip->has_text_encoder);
  1066. LOG_INF("%s: vision_encoder: %d\n", __func__, new_clip->has_vision_encoder);
  1067. LOG_INF("%s: llava_projector: %d\n", __func__, new_clip->has_llava_projector);
  1068. LOG_INF("%s: minicpmv_projector: %d\n", __func__, new_clip->has_minicpmv_projector);
  1069. LOG_INF("%s: model size: %.2f MB\n", __func__, model_size / 1024.0 / 1024.0);
  1070. LOG_INF("%s: metadata size: %.2f MB\n", __func__, ggml_get_mem_size(meta) / 1024.0 / 1024.0);
  1071. }
  1072. }
  1073. LOG_INF("%s: params backend buffer size = % 6.2f MB (%i tensors)\n", __func__, model_size / (1024.0 * 1024.0), n_tensors);
  1074. // load tensors
  1075. {
  1076. std::vector<uint8_t> read_buf;
  1077. struct ggml_init_params params = {
  1078. /*.mem_size =*/ (n_tensors + 1) * ggml_tensor_overhead(),
  1079. /*.mem_buffer =*/ NULL,
  1080. /*.no_alloc =*/ true,
  1081. };
  1082. new_clip->ctx_data = ggml_init(params);
  1083. if (!new_clip->ctx_data) {
  1084. LOG_ERR("%s: ggml_init() failed\n", __func__);
  1085. clip_free(new_clip);
  1086. gguf_free(ctx);
  1087. return nullptr;
  1088. }
  1089. #ifdef _WIN32
  1090. int wlen = MultiByteToWideChar(CP_UTF8, 0, fname, -1, NULL, 0);
  1091. if (!wlen) {
  1092. return NULL;
  1093. }
  1094. wchar_t * wbuf = (wchar_t *) malloc(wlen * sizeof(wchar_t));
  1095. wlen = MultiByteToWideChar(CP_UTF8, 0, fname, -1, wbuf, wlen);
  1096. if (!wlen) {
  1097. free(wbuf);
  1098. return NULL;
  1099. }
  1100. #if __GLIBCXX__
  1101. int fd = _wopen(wbuf, _O_RDONLY | _O_BINARY);
  1102. __gnu_cxx::stdio_filebuf<char> buffer(fd, std::ios_base::in);
  1103. std::istream fin(&buffer);
  1104. #else // MSVC
  1105. // unused in our current build
  1106. auto fin = std::ifstream(wbuf, std::ios::binary);
  1107. #endif
  1108. free(wbuf);
  1109. #else
  1110. auto fin = std::ifstream(fname, std::ios::binary);
  1111. #endif
  1112. if (!fin) {
  1113. LOG_ERR("cannot open model file for loading tensors\n");
  1114. clip_free(new_clip);
  1115. gguf_free(ctx);
  1116. return nullptr;
  1117. }
  1118. // add tensors to context
  1119. for (int i = 0; i < n_tensors; ++i) {
  1120. const char * name = gguf_get_tensor_name(ctx, i);
  1121. struct ggml_tensor * t = ggml_get_tensor(meta, name);
  1122. struct ggml_tensor * cur = ggml_dup_tensor(new_clip->ctx_data, t);
  1123. ggml_set_name(cur, name);
  1124. }
  1125. // alloc memory and offload data
  1126. new_clip->params_buffer = ggml_backend_alloc_ctx_tensors(new_clip->ctx_data, new_clip->backend);
  1127. for (int i = 0; i < n_tensors; ++i) {
  1128. const char * name = gguf_get_tensor_name(ctx, i);
  1129. struct ggml_tensor * cur = ggml_get_tensor(new_clip->ctx_data, name);
  1130. const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i);
  1131. fin.seekg(offset, std::ios::beg);
  1132. if (!fin) {
  1133. LOG_ERR("%s: failed to seek for tensor %s\n", __func__, name);
  1134. clip_free(new_clip);
  1135. gguf_free(ctx);
  1136. return nullptr;
  1137. }
  1138. int num_bytes = ggml_nbytes(cur);
  1139. if (ggml_backend_buffer_is_host(new_clip->params_buffer)) {
  1140. // for the CPU and Metal backend, we can read directly into the tensor
  1141. fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
  1142. } else {
  1143. // read into a temporary buffer first, then copy to device memory
  1144. read_buf.resize(num_bytes);
  1145. fin.read(reinterpret_cast<char *>(read_buf.data()), num_bytes);
  1146. ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
  1147. }
  1148. }
  1149. #if defined(_WIN32) && defined(__GLIBCXX__)
  1150. close(fd);
  1151. #else
  1152. fin.close();
  1153. #endif
  1154. }
  1155. // vision model
  1156. if (new_clip->has_vision_encoder) {
  1157. // load vision model
  1158. auto & vision_model = new_clip->vision_model;
  1159. auto & hparams = vision_model.hparams;
  1160. hparams.hidden_size = get_u32(ctx, format(KEY_N_EMBD, "vision"));
  1161. hparams.n_head = get_u32(ctx, format(KEY_N_HEAD, "vision"));
  1162. hparams.n_intermediate = get_u32(ctx, format(KEY_N_FF, "vision"));
  1163. hparams.n_layer = get_u32(ctx, format(KEY_N_BLOCK, "vision"));
  1164. hparams.image_size = get_u32(ctx, KEY_IMAGE_SIZE);
  1165. hparams.patch_size = get_u32(ctx, KEY_PATCH_SIZE);
  1166. hparams.projection_dim = get_u32(ctx, format(KEY_PROJ_DIM, "vision"));
  1167. hparams.eps = get_f32(ctx, format(KEY_LAYER_NORM_EPS, "vision"));
  1168. try {
  1169. int idx = get_key_idx(ctx, KEY_IMAGE_GRID_PINPOINTS);
  1170. int n = gguf_get_arr_n(ctx, idx);
  1171. const int32_t * pinpoints = (const int32_t *)gguf_get_arr_data(ctx, idx);
  1172. for (int i = 0; i < 32 && i < n && pinpoints[i] != 0; ++i) {
  1173. hparams.image_grid_pinpoints[i] = pinpoints[i];
  1174. }
  1175. if (n < 32)
  1176. hparams.image_grid_pinpoints[n] = 0;
  1177. } catch (std::runtime_error & /*e*/) {
  1178. hparams.image_grid_pinpoints[0]=0;
  1179. }
  1180. try {
  1181. int idx = get_key_idx(ctx, KEY_MM_PATCH_MERGE_TYPE);
  1182. strcpy(hparams.mm_patch_merge_type, gguf_get_val_str(ctx, idx));
  1183. } catch (std::runtime_error & /*e*/) {
  1184. strcpy(hparams.mm_patch_merge_type, "flat");
  1185. }
  1186. try {
  1187. hparams.image_crop_resolution = get_u32(ctx, KEY_IMAGE_CROP_RESOLUTION); // llava-1.6
  1188. } catch(const std::exception& /*e*/) {
  1189. hparams.image_crop_resolution = hparams.image_size;
  1190. }
  1191. int idx_mean = get_key_idx(ctx, KEY_IMAGE_MEAN);
  1192. int idx_std = get_key_idx(ctx, KEY_IMAGE_STD);
  1193. const float * mean_data = (const float *)gguf_get_arr_data(ctx, idx_mean);
  1194. const float * std_data = (const float *)gguf_get_arr_data(ctx, idx_std);
  1195. for (int i = 0; i < 3; ++i) {
  1196. new_clip->image_mean[i] = mean_data[i];
  1197. new_clip->image_std[i] = std_data[i];
  1198. }
  1199. if (verbosity >= 2) {
  1200. LOG_INF("\n%s: vision model hparams\n", __func__);
  1201. LOG_INF("image_size %d\n", hparams.image_size);
  1202. LOG_INF("patch_size %d\n", hparams.patch_size);
  1203. LOG_INF("v_hidden_size %d\n", hparams.hidden_size);
  1204. LOG_INF("v_n_intermediate %d\n", hparams.n_intermediate);
  1205. LOG_INF("v_projection_dim %d\n", hparams.projection_dim);
  1206. LOG_INF("v_n_head %d\n", hparams.n_head);
  1207. LOG_INF("v_n_layer %d\n", hparams.n_layer);
  1208. LOG_INF("v_eps %f\n", hparams.eps);
  1209. LOG_INF("v_image_mean %f %f %f\n", new_clip->image_mean[0], new_clip->image_mean[1], new_clip->image_mean[2]);
  1210. LOG_INF("v_image_std %f %f %f\n", new_clip->image_std[0], new_clip->image_std[1], new_clip->image_std[2]);
  1211. LOG_INF("v_image_grid_pinpoints: ");
  1212. for (int i = 0; i < 32 && (hparams.image_grid_pinpoints[i] != 0); ++i) {
  1213. LOG_INF("%d ", hparams.image_grid_pinpoints[i]);
  1214. }
  1215. LOG_INF("\n");
  1216. LOG_INF("v_mm_patch_merge_type: %s\n", hparams.mm_patch_merge_type);
  1217. }
  1218. try {
  1219. vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
  1220. new_clip->has_class_embedding = true;
  1221. } catch (const std::exception& /*e*/) {
  1222. new_clip->has_class_embedding = false;
  1223. }
  1224. try {
  1225. vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
  1226. vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
  1227. new_clip->has_pre_norm = true;
  1228. } catch (std::exception & /*e*/) {
  1229. new_clip->has_pre_norm = false;
  1230. }
  1231. try {
  1232. vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight"));
  1233. vision_model.post_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "bias"));
  1234. new_clip->has_post_norm = true;
  1235. } catch (std::exception & /*e*/) {
  1236. new_clip->has_post_norm = false;
  1237. }
  1238. try {
  1239. vision_model.patch_bias = get_tensor(new_clip->ctx_data, TN_PATCH_BIAS);
  1240. new_clip->has_patch_bias = true;
  1241. } catch (std::exception & /*e*/) {
  1242. new_clip->has_patch_bias = false;
  1243. }
  1244. try {
  1245. vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
  1246. vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
  1247. } catch(const std::exception& /*e*/) {
  1248. LOG_ERR("%s: failed to load vision model tensors\n", __func__);
  1249. }
  1250. // LLaVA projection
  1251. if (new_clip->proj_type == PROJECTOR_TYPE_MLP || new_clip->proj_type == PROJECTOR_TYPE_MLP_NORM) {
  1252. vision_model.mm_0_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "weight"));
  1253. vision_model.mm_0_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "bias"));
  1254. try {
  1255. // Yi-type llava
  1256. vision_model.mm_1_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "weight"));
  1257. vision_model.mm_1_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "bias"));
  1258. } catch (std::runtime_error & /*e*/) { }
  1259. try {
  1260. // missing in Yi-type llava
  1261. vision_model.mm_2_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "weight"));
  1262. vision_model.mm_2_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "bias"));
  1263. } catch (std::runtime_error & /*e*/) { }
  1264. try {
  1265. // Yi-type llava
  1266. vision_model.mm_3_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "weight"));
  1267. vision_model.mm_3_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "bias"));
  1268. } catch (std::runtime_error & /*e*/) { }
  1269. try {
  1270. // Yi-type llava
  1271. vision_model.mm_4_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "weight"));
  1272. vision_model.mm_4_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "bias"));
  1273. } catch (std::runtime_error & /*e*/) { }
  1274. try {
  1275. vision_model.image_newline = get_tensor(new_clip->ctx_data, TN_IMAGE_NEWLINE);
  1276. // LOG_INF("%s: image_newline tensor (llava-1.6) found\n", __func__);
  1277. } catch (std::runtime_error & /*e*/) { }
  1278. } else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) {
  1279. // MobileVLM projection
  1280. vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "weight"));
  1281. vision_model.mm_model_mlp_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "bias"));
  1282. vision_model.mm_model_mlp_3_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 3, "weight"));
  1283. vision_model.mm_model_mlp_3_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 3, "bias"));
  1284. vision_model.mm_model_block_1_block_0_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
  1285. vision_model.mm_model_block_1_block_0_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
  1286. vision_model.mm_model_block_1_block_0_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
  1287. vision_model.mm_model_block_1_block_1_fc1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
  1288. vision_model.mm_model_block_1_block_1_fc1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
  1289. vision_model.mm_model_block_1_block_1_fc2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
  1290. vision_model.mm_model_block_1_block_1_fc2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
  1291. vision_model.mm_model_block_1_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
  1292. vision_model.mm_model_block_1_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
  1293. vision_model.mm_model_block_1_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
  1294. vision_model.mm_model_block_2_block_0_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
  1295. vision_model.mm_model_block_2_block_0_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
  1296. vision_model.mm_model_block_2_block_0_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
  1297. vision_model.mm_model_block_2_block_1_fc1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
  1298. vision_model.mm_model_block_2_block_1_fc1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
  1299. vision_model.mm_model_block_2_block_1_fc2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
  1300. vision_model.mm_model_block_2_block_1_fc2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
  1301. vision_model.mm_model_block_2_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
  1302. vision_model.mm_model_block_2_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
  1303. vision_model.mm_model_block_2_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
  1304. }
  1305. else if (new_clip->proj_type == PROJECTOR_TYPE_LDPV2)
  1306. {
  1307. // MobilVLM_V2 projection
  1308. vision_model.mm_model_mlp_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 0, "weight"));
  1309. vision_model.mm_model_mlp_0_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 0, "bias"));
  1310. vision_model.mm_model_mlp_2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 2, "weight"));
  1311. vision_model.mm_model_mlp_2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 2, "bias"));
  1312. vision_model.mm_model_peg_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_PEG, 0, "weight"));
  1313. vision_model.mm_model_peg_0_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_PEG, 0, "bias"));
  1314. }
  1315. else if (new_clip->proj_type == PROJECTOR_TYPE_RESAMPLER) {
  1316. // vision_model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
  1317. vision_model.mm_model_pos_embed_k = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD_K);
  1318. vision_model.mm_model_query = get_tensor(new_clip->ctx_data, TN_MINICPMV_QUERY);
  1319. vision_model.mm_model_proj = get_tensor(new_clip->ctx_data, TN_MINICPMV_PROJ);
  1320. vision_model.mm_model_kv_proj = get_tensor(new_clip->ctx_data, TN_MINICPMV_KV_PROJ);
  1321. vision_model.mm_model_attn_q_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "q", "weight"));
  1322. vision_model.mm_model_attn_k_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "k", "weight"));
  1323. vision_model.mm_model_attn_v_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "v", "weight"));
  1324. vision_model.mm_model_attn_q_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "q", "bias"));
  1325. vision_model.mm_model_attn_k_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "k", "bias"));
  1326. vision_model.mm_model_attn_v_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "v", "bias"));
  1327. vision_model.mm_model_attn_o_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "out", "weight"));
  1328. vision_model.mm_model_attn_o_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "out", "bias"));
  1329. vision_model.mm_model_ln_q_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "q", "weight"));
  1330. vision_model.mm_model_ln_q_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "q", "bias"));
  1331. vision_model.mm_model_ln_kv_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "kv", "weight"));
  1332. vision_model.mm_model_ln_kv_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "kv", "bias"));
  1333. vision_model.mm_model_ln_post_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "weight"));
  1334. vision_model.mm_model_ln_post_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "bias"));
  1335. }
  1336. else {
  1337. std::string proj_type = PROJECTOR_TYPE_NAMES[new_clip->proj_type];
  1338. throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
  1339. }
  1340. vision_model.layers.resize(hparams.n_layer);
  1341. for (int il = 0; il < hparams.n_layer; ++il) {
  1342. auto & layer = vision_model.layers[il];
  1343. layer.k_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_K, "v", il, "weight"));
  1344. layer.q_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_Q, "v", il, "weight"));
  1345. layer.v_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_V, "v", il, "weight"));
  1346. layer.o_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_OUTPUT, "v", il, "weight"));
  1347. layer.ln_1_w = get_tensor(new_clip->ctx_data, format(TN_LN_1, "v", il, "weight"));
  1348. layer.ln_2_w = get_tensor(new_clip->ctx_data, format(TN_LN_2, "v", il, "weight"));
  1349. layer.ff_i_w = get_tensor(new_clip->ctx_data, format(TN_FFN_DOWN, "v", il, "weight"));
  1350. layer.ff_o_w = get_tensor(new_clip->ctx_data, format(TN_FFN_UP, "v", il, "weight"));
  1351. layer.k_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_K, "v", il, "bias"));
  1352. layer.q_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_Q, "v", il, "bias"));
  1353. layer.v_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_V, "v", il, "bias"));
  1354. layer.o_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_OUTPUT, "v", il, "bias"));
  1355. layer.ln_1_b = get_tensor(new_clip->ctx_data, format(TN_LN_1, "v", il, "bias"));
  1356. layer.ln_2_b = get_tensor(new_clip->ctx_data, format(TN_LN_2, "v", il, "bias"));
  1357. layer.ff_i_b = get_tensor(new_clip->ctx_data, format(TN_FFN_DOWN, "v", il, "bias"));
  1358. layer.ff_o_b = get_tensor(new_clip->ctx_data, format(TN_FFN_UP, "v", il, "bias"));
  1359. }
  1360. }
  1361. ggml_free(meta);
  1362. new_clip->ctx_gguf = ctx;
  1363. // measure mem requirement and allocate
  1364. {
  1365. new_clip->buf_compute_meta.resize(GGML_DEFAULT_GRAPH_SIZE * ggml_tensor_overhead() + ggml_graph_overhead());
  1366. new_clip->compute_alloc = ggml_gallocr_new(ggml_backend_get_default_buffer_type(new_clip->backend));
  1367. clip_image_f32_batch batch;
  1368. batch.size = 1;
  1369. ggml_cgraph * gf = clip_image_build_graph(new_clip, &batch, nullptr, false);
  1370. ggml_gallocr_reserve(new_clip->compute_alloc, gf);
  1371. size_t compute_memory_buffer_size = ggml_gallocr_get_buffer_size(new_clip->compute_alloc, 0);
  1372. LOG_INF("%s: compute allocated memory: %.2f MB\n", __func__, compute_memory_buffer_size /1024.0/1024.0);
  1373. }
  1374. return new_clip;
  1375. }
  1376. void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size * load_image_size) {
  1377. ctx_clip->load_image_size = load_image_size;
  1378. }
  1379. struct clip_image_size * clip_image_size_init() {
  1380. struct clip_image_size * load_image_size = new struct clip_image_size();
  1381. load_image_size->width = 448;
  1382. load_image_size->height = 448;
  1383. return load_image_size;
  1384. }
  1385. struct clip_image_u8 * clip_image_u8_init() {
  1386. return new clip_image_u8();
  1387. }
  1388. struct clip_image_f32 * clip_image_f32_init() {
  1389. return new clip_image_f32();
  1390. }
  1391. void clip_image_u8_free(struct clip_image_u8 * img) { delete img; }
  1392. void clip_image_f32_free(struct clip_image_f32 * img) { delete img; }
  1393. void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) {
  1394. if (batch->size > 0) {
  1395. delete[] batch->data;
  1396. batch->size = 0;
  1397. }
  1398. }
  1399. void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) {
  1400. if (batch->size > 0) {
  1401. delete[] batch->data;
  1402. batch->size = 0;
  1403. }
  1404. }
  1405. static void build_clip_img_from_data(const stbi_uc * data, int nx, int ny, clip_image_u8 * img) {
  1406. img->nx = nx;
  1407. img->ny = ny;
  1408. img->buf.resize(3 * nx * ny);
  1409. memcpy(img->buf.data(), data, img->buf.size());
  1410. }
  1411. bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) {
  1412. int nx, ny, nc;
  1413. auto * data = stbi_load(fname, &nx, &ny, &nc, 3);
  1414. if (!data) {
  1415. LOG_ERR("%s: failed to load image '%s'\n", __func__, fname);
  1416. return false;
  1417. }
  1418. build_clip_img_from_data(data, nx, ny, img);
  1419. stbi_image_free(data);
  1420. return true;
  1421. }
  1422. bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) {
  1423. int nx, ny, nc;
  1424. auto * data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3);
  1425. if (!data) {
  1426. LOG_ERR("%s: failed to decode image bytes\n", __func__);
  1427. return false;
  1428. }
  1429. build_clip_img_from_data(data, nx, ny, img);
  1430. stbi_image_free(data);
  1431. return true;
  1432. }
  1433. // Linear interpolation between two points
  1434. inline float clip_lerp(float s, float e, float t) {
  1435. return s + (e - s) * t;
  1436. }
  1437. // Bilinear resize function
  1438. static void bilinear_resize(const clip_image_u8& src, clip_image_u8& dst, int target_width, int target_height) {
  1439. dst.nx = target_width;
  1440. dst.ny = target_height;
  1441. dst.buf.resize(3 * target_width * target_height);
  1442. float x_ratio = static_cast<float>(src.nx - 1) / target_width;
  1443. float y_ratio = static_cast<float>(src.ny - 1) / target_height;
  1444. for (int y = 0; y < target_height; y++) {
  1445. for (int x = 0; x < target_width; x++) {
  1446. float px = x_ratio * x;
  1447. float py = y_ratio * y;
  1448. int x_floor = static_cast<int>(px);
  1449. int y_floor = static_cast<int>(py);
  1450. float x_lerp = px - x_floor;
  1451. float y_lerp = py - y_floor;
  1452. for (int c = 0; c < 3; c++) {
  1453. float top = clip_lerp(
  1454. static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
  1455. static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
  1456. x_lerp
  1457. );
  1458. float bottom = clip_lerp(
  1459. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
  1460. static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
  1461. x_lerp
  1462. );
  1463. dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(clip_lerp(top, bottom, y_lerp));
  1464. }
  1465. }
  1466. }
  1467. }
  1468. // Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
  1469. static void normalize_image_u8_to_f32(const clip_image_u8* src, clip_image_f32* dst, const float mean[3], const float std[3]) {
  1470. dst->nx = src->nx;
  1471. dst->ny = src->ny;
  1472. dst->buf.resize(src->buf.size());
  1473. for (size_t i = 0; i < src->buf.size(); ++i) {
  1474. int c = i % 3; // rgb
  1475. dst->buf[i] = (static_cast<float>(src->buf[i]) / 255.0f - mean[c]) / std[c];
  1476. }
  1477. }
  1478. inline int clip(int x, int lower, int upper) {
  1479. return std::max(lower, std::min(x, upper));
  1480. }
  1481. static bool bicubic_resize(const clip_image_u8 &img, clip_image_u8 &dst, int target_width, int target_height) {
  1482. const int nx = img.nx;
  1483. const int ny = img.ny;
  1484. dst.nx = target_width;
  1485. dst.ny = target_height;
  1486. dst.buf.resize(3 * target_width * target_height);
  1487. float Cc;
  1488. float C[5];
  1489. float d0, d2, d3, a0, a1, a2, a3;
  1490. int i, j, k, jj;
  1491. int x, y;
  1492. float dx, dy;
  1493. float tx, ty;
  1494. tx = (float)nx / (float)target_width;
  1495. ty = (float)ny / (float)target_height;
  1496. // Bicubic interpolation; adapted from ViT.cpp, inspired from :
  1497. // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36
  1498. // -> https://en.wikipedia.org/wiki/Bicubic_interpolation
  1499. for (i = 0; i < target_height; i++) {
  1500. for (j = 0; j < target_width; j++) {
  1501. x = (int)(tx * j);
  1502. y = (int)(ty * i);
  1503. dx = tx * j - x;
  1504. dy = ty * i - y;
  1505. for (k = 0; k < 3; k++) {
  1506. for (jj = 0; jj <= 3; jj++) {
  1507. d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1508. d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1509. d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1510. a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
  1511. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  1512. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  1513. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  1514. C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx;
  1515. d0 = C[0] - C[1];
  1516. d2 = C[2] - C[1];
  1517. d3 = C[3] - C[1];
  1518. a0 = C[1];
  1519. a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
  1520. a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
  1521. a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
  1522. Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy;
  1523. const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f);
  1524. dst.buf[(i * target_width + j) * 3 + k] = float(Cc2);
  1525. }
  1526. }
  1527. }
  1528. }
  1529. return true;
  1530. }
  1531. // llava-1.6 type of resize_and_pad (black)
  1532. static void resize_and_pad_image(const clip_image_u8& image, clip_image_u8 &image_output, const std::pair<int, int>& target_resolution) {
  1533. int target_width = target_resolution.first;
  1534. int target_height = target_resolution.second;
  1535. float scale_w = static_cast<float>(target_width) / image.nx;
  1536. float scale_h = static_cast<float>(target_height) / image.ny;
  1537. int new_width, new_height;
  1538. if (scale_w < scale_h) {
  1539. new_width = target_width;
  1540. new_height = std::min(static_cast<int>(std::ceil(image.ny * scale_w)), target_height);
  1541. } else {
  1542. new_height = target_height;
  1543. new_width = std::min(static_cast<int>(std::ceil(image.nx * scale_h)), target_width);
  1544. }
  1545. clip_image_u8 resized_image;
  1546. // bilinear_resize(image, resized_image, new_width, new_height);
  1547. bicubic_resize(image, resized_image, new_width, new_height);
  1548. clip_image_u8 padded_image;
  1549. padded_image.nx = target_width;
  1550. padded_image.ny = target_height;
  1551. padded_image.buf.resize(3 * target_width * target_height, 0); // Initialize with black
  1552. // Calculate padding offsets
  1553. int pad_x = (target_width - new_width) / 2;
  1554. int pad_y = (target_height - new_height) / 2;
  1555. // Copy the resized image into the center of the padded buffer
  1556. for (int y = 0; y < new_height; ++y) {
  1557. for (int x = 0; x < new_width; ++x) {
  1558. for (int c = 0; c < 3; ++c) {
  1559. padded_image.buf[3 * ((y + pad_y) * target_width + (x + pad_x)) + c] = resized_image.buf[3 * (y * new_width + x) + c];
  1560. }
  1561. }
  1562. }
  1563. image_output = std::move(padded_image);
  1564. }
  1565. /**
  1566. * Selects the best resolution from a list of possible resolutions based on the original size.
  1567. *
  1568. * @param original_size The original size of the image in the format (width, height).
  1569. * @param possible_resolutions A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
  1570. * @return The best fit resolution in the format (width, height).
  1571. */
  1572. static std::pair<int, int> select_best_resolution(const std::pair<int, int> & original_size, const std::vector<std::pair<int, int>> & possible_resolutions) {
  1573. int original_width = original_size.first;
  1574. int original_height = original_size.second;
  1575. std::pair<int, int> best_fit;
  1576. int max_effective_resolution = 0;
  1577. int min_wasted_resolution = std::numeric_limits<int>::max();
  1578. for (const auto& resolution : possible_resolutions) {
  1579. int width = resolution.first;
  1580. int height = resolution.second;
  1581. float scale = std::min(static_cast<float>(width) / original_width, static_cast<float>(height) / original_height);
  1582. int downscaled_width = static_cast<int>(original_width * scale);
  1583. int downscaled_height = static_cast<int>(original_height * scale);
  1584. int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height);
  1585. int wasted_resolution = (width * height) - effective_resolution;
  1586. // LOG_INF("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution);
  1587. if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) {
  1588. max_effective_resolution = effective_resolution;
  1589. min_wasted_resolution = wasted_resolution;
  1590. best_fit = resolution;
  1591. }
  1592. }
  1593. return best_fit;
  1594. }
  1595. static std::vector<clip_image_u8*> divide_to_patches_u8(const clip_image_u8 & image, int patch_size) {
  1596. std::vector<clip_image_u8*> patches;
  1597. int width = image.nx;
  1598. int height = image.ny;
  1599. for (int i = 0; i < height; i += patch_size) {
  1600. for (int j = 0; j < width; j += patch_size) {
  1601. clip_image_u8 *patch = clip_image_u8_init();
  1602. patch->nx = std::min(patch_size, width - j);
  1603. patch->ny = std::min(patch_size, height - i);
  1604. patch->buf.resize(3 * patch->nx * patch->ny);
  1605. for (int y = 0; y < patch->ny; ++y) {
  1606. for (int x = 0; x < patch->nx; ++x) {
  1607. for (int c = 0; c < 3; ++c) {
  1608. patch->buf[3 * (y * patch->nx + x) + c] = image.buf[3 * ((i + y) * width + (j + x)) + c];
  1609. }
  1610. }
  1611. }
  1612. patches.push_back(patch);
  1613. }
  1614. }
  1615. return patches;
  1616. }
  1617. static int ensure_divide(int length, int patch_size) {
  1618. return std::max(static_cast<int>(std::round(static_cast<float>(length) / patch_size) * patch_size), patch_size);
  1619. }
  1620. static std::pair<int, int> uhd_find_best_resize(std::pair<int, int> original_size, int scale_resolution, int patch_size, bool allow_upscale = false) {
  1621. int width = original_size.first;
  1622. int height = original_size.second;
  1623. if ((width * height > scale_resolution * scale_resolution) || allow_upscale) {
  1624. float r = static_cast<float>(width) / height;
  1625. height = static_cast<int>(scale_resolution / std::sqrt(r));
  1626. width = static_cast<int>(height * r);
  1627. }
  1628. int best_width = ensure_divide(width, patch_size);
  1629. int best_height = ensure_divide(height, patch_size);
  1630. return std::make_pair(best_width, best_height);
  1631. }
  1632. static std::pair<int, int> uhd_get_refine_size(std::pair<int, int> original_size, std::pair<int, int> grid, int scale_resolution, int patch_size, bool allow_upscale = false) {
  1633. int width, height;
  1634. std::tie(width, height) = original_size;
  1635. int grid_x, grid_y;
  1636. std::tie(grid_x, grid_y) = grid;
  1637. int refine_width = ensure_divide(width, grid_x);
  1638. int refine_height = ensure_divide(height, grid_y);
  1639. int grid_width = refine_width / grid_x;
  1640. int grid_height = refine_height / grid_y;
  1641. // auto best_grid_size = find_best_resize(std::make_tuple(grid_width, grid_height), scale_resolution, patch_size, allow_upscale); (old line)
  1642. auto best_grid_size = uhd_find_best_resize(std::make_pair(grid_width, grid_height), scale_resolution, patch_size, allow_upscale); // (new line) => fixes conversion for make_tuple to make_pair
  1643. int best_grid_width, best_grid_height;
  1644. std::tie(best_grid_width, best_grid_height) = best_grid_size;
  1645. // std::pair<int, int> refine_size = std::make_tuple(best_grid_width * grid_x, best_grid_height * grid_y); (old line)
  1646. std::pair<int, int> refine_size = std::make_pair(best_grid_width * grid_x, best_grid_height * grid_y); // (new line)
  1647. return refine_size;
  1648. }
  1649. static std::pair<int, int> uhd_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) {
  1650. std::vector<int> candidate_split_grids_nums;
  1651. for (int i : {multiple - 1, multiple, multiple + 1}) {
  1652. if (i == 1 || i > max_slice_nums) {
  1653. continue;
  1654. }
  1655. candidate_split_grids_nums.push_back(i);
  1656. }
  1657. std::vector<std::pair<int, int>> candidate_grids;
  1658. for (int split_grids_nums : candidate_split_grids_nums) {
  1659. int m = 1;
  1660. while (m <= split_grids_nums) {
  1661. if (split_grids_nums % m == 0) {
  1662. candidate_grids.emplace_back(m, split_grids_nums / m);
  1663. }
  1664. ++m;
  1665. }
  1666. }
  1667. std::pair<int, int> best_grid{1, 1};
  1668. float min_error = std::numeric_limits<float>::infinity();
  1669. for (const auto& grid : candidate_grids) {
  1670. float error = std::abs(log_ratio - std::log(1.0 * grid.first / grid.second));
  1671. if (error < min_error) {
  1672. best_grid = grid;
  1673. min_error = error;
  1674. }
  1675. }
  1676. return best_grid;
  1677. }
  1678. // inspired from LLaVA-UHD:
  1679. // -> https://arxiv.org/pdf/2403.11703
  1680. // -> https://github.com/thunlp/LLaVA-UHD
  1681. // -> https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118
  1682. static std::vector<std::vector<clip_image_u8 *>> uhd_slice_image(const clip_image_u8 * img, const int max_slice_nums=9, const int scale_resolution=448, const int patch_size=14) {
  1683. const std::pair<int, int> original_size={img->nx,img->ny};
  1684. const int original_width = img->nx;
  1685. const int original_height = img->ny;
  1686. const float log_ratio = log(1.0*original_width/original_height);
  1687. const float ratio = 1.0 * original_width * original_height/ (scale_resolution * scale_resolution);
  1688. const int multiple = fmin(ceil(ratio), max_slice_nums);
  1689. std::vector<std::vector<clip_image_u8 *>> images;
  1690. LOG_INF("%s: multiple %d\n", __func__, multiple);
  1691. images.push_back(std::vector<clip_image_u8 *>());
  1692. if (multiple <= 1) {
  1693. auto best_size = uhd_find_best_resize(original_size, scale_resolution, patch_size, true);
  1694. clip_image_u8 * source_image = clip_image_u8_init();
  1695. bicubic_resize(*img, *source_image, best_size.first, best_size.second);
  1696. // source_image = image.resize(best_size, Image.Resampling.BICUBIC)
  1697. images[images.size()-1].push_back(source_image);
  1698. }
  1699. else if (multiple > 1) {
  1700. auto best_size = uhd_find_best_resize(original_size, scale_resolution, patch_size);
  1701. clip_image_u8 * source_image = clip_image_u8_init();
  1702. bicubic_resize(*img, *source_image, best_size.first, best_size.second);
  1703. // source_image = image.copy().resize(best_resize, Image.Resampling.BICUBIC)
  1704. LOG_INF("%s: image_size: %d %d; source_image size: %d %d\n", __func__, img->nx, img->ny, best_size.first, best_size.second);
  1705. images[images.size()-1].push_back(source_image);
  1706. std::pair<int, int> best_grid = uhd_best_grid(max_slice_nums, multiple, log_ratio);
  1707. LOG_INF("%s: image_size: %d %d; best_grid: %d %d\n", __func__, img->nx, img->ny, best_grid.first, best_grid.second);
  1708. auto refine_size = uhd_get_refine_size(original_size, best_grid, scale_resolution, patch_size, true);
  1709. clip_image_u8 * refine_image = clip_image_u8_init();
  1710. bicubic_resize(*img, *refine_image, refine_size.first, refine_size.second);
  1711. LOG_INF("%s: refine_image_size: %d %d; refine_size: %d %d\n", __func__, refine_image->nx, refine_image->ny, refine_size.first, refine_size.second);
  1712. // split_to_patches
  1713. int width = refine_image->nx;
  1714. int height = refine_image->ny;
  1715. int grid_x = int(width / best_grid.first);
  1716. int grid_y = int(height / best_grid.second);
  1717. for (int patches_i = 0, ic = 0; patches_i < height && ic < best_grid.second; patches_i += grid_y, ic += 1){
  1718. images.push_back(std::vector<clip_image_u8 *>());
  1719. for(int patches_j = 0, jc = 0; patches_j < width && jc < best_grid.first; patches_j += grid_x, jc += 1){
  1720. clip_image_u8 * patch = clip_image_u8_init();
  1721. patch->nx = grid_x;
  1722. patch->ny = grid_y;
  1723. patch->buf.resize(3 * patch->nx * patch->ny);
  1724. for (int y = patches_i; y < patches_i + grid_y; ++y) {
  1725. for (int x = patches_j; x < patches_j + grid_x; ++x) {
  1726. const int i = 3 * (y * refine_image->nx + x);
  1727. const int j = 3 * ((y-patches_i) * patch->nx + (x-patches_j));
  1728. patch->buf[j] = refine_image->buf[i];
  1729. patch->buf[j+1] = refine_image->buf[i+1];
  1730. patch->buf[j+2] = refine_image->buf[i+2];
  1731. }
  1732. }
  1733. images[images.size()-1].push_back(patch);
  1734. }
  1735. }
  1736. }
  1737. return images;
  1738. }
  1739. int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip) {
  1740. const int max_slice_nums=9;
  1741. const int scale_resolution=448;
  1742. const int original_width = ctx_clip->load_image_size->width;
  1743. const int original_height = ctx_clip->load_image_size->height;
  1744. const float log_ratio = log(1.0*original_width/original_height);
  1745. const float ratio = 1.0 * original_width * original_height/ (scale_resolution * scale_resolution);
  1746. const int multiple = fmin(ceil(ratio), max_slice_nums);
  1747. std::pair<int, int> best_grid = uhd_best_grid(max_slice_nums, multiple, log_ratio);
  1748. return best_grid.first;
  1749. }
  1750. // returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
  1751. // res_imgs memory is being allocated here, previous allocations will be freed if found
  1752. bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32_batch * res_imgs) {
  1753. if(clip_is_minicpmv(ctx)){
  1754. int max_slice_nums = 9;
  1755. std::vector<std::vector<clip_image_u8 *>> imgs = uhd_slice_image(img, max_slice_nums);
  1756. res_imgs->size = 0;
  1757. for (size_t i = 0; i < imgs.size(); ++i){
  1758. res_imgs->size += imgs[i].size();
  1759. }
  1760. res_imgs->data = new clip_image_f32[res_imgs->size];
  1761. int idx = 0;
  1762. for (size_t i = 0; i < imgs.size(); ++i) {
  1763. for (size_t j = 0; j < imgs[i].size(); ++j) {
  1764. LOG_DBG("%s: %d %d\n", __func__,imgs[i][j]->nx,imgs[i][j]->ny);
  1765. clip_image_f32 * res = clip_image_f32_init();
  1766. normalize_image_u8_to_f32(imgs[i][j], res, ctx->image_mean, ctx->image_std);
  1767. res_imgs->data[idx++] = *res;
  1768. clip_image_f32_free(res);
  1769. }
  1770. }
  1771. return true;
  1772. }
  1773. bool pad_to_square = true;
  1774. if (!ctx->has_vision_encoder) {
  1775. LOG_ERR("This gguf file seems to have no vision encoder\n");
  1776. return false;
  1777. }
  1778. auto & params = ctx->vision_model.hparams;
  1779. // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
  1780. if (strcmp(params.mm_patch_merge_type, "spatial_unpad") == 0) {
  1781. pad_to_square = false;
  1782. }
  1783. // free the previous res_imgs if any set
  1784. if (res_imgs->size > 0) {
  1785. clip_image_f32_batch_free(res_imgs);
  1786. }
  1787. res_imgs->data = nullptr;
  1788. res_imgs->size = 0;
  1789. // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
  1790. // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
  1791. clip_image_u8 * temp = clip_image_u8_init(); // we will keep the input image data here temporarily
  1792. if (pad_to_square && img->nx != img->ny) {
  1793. int longer_side = std::max(img->nx, img->ny);
  1794. temp->nx = longer_side;
  1795. temp->ny = longer_side;
  1796. temp->buf.resize(3 * longer_side * longer_side);
  1797. const uint8_t bc[3] = {122, 116, 104}; // background color in RGB from LLaVA (this is the mean rgb color * 255)
  1798. // fill with background color
  1799. for (size_t i = 0; i < temp->buf.size(); i++) {
  1800. temp->buf[i] = bc[i % 3];
  1801. }
  1802. // copy from the input image
  1803. for (int y = 0; y < img->ny; y++) {
  1804. for (int x = 0; x < img->nx; x++) {
  1805. const int i = 3 * (y * img->nx + x);
  1806. const int j = 3 * (y * temp->nx + x);
  1807. temp->buf[j] = img->buf[i];
  1808. temp->buf[j+1] = img->buf[i+1];
  1809. temp->buf[j+2] = img->buf[i+2];
  1810. }
  1811. }
  1812. } else {
  1813. if (params.image_grid_pinpoints[0] != 0) {
  1814. // "spatial_unpad" with "anyres" processing for llava-1.6
  1815. std::vector<std::pair<int, int>> possible_resolutions;
  1816. for (int i = 0; i < 32 && params.image_grid_pinpoints[i] != 0; i+=2) {
  1817. possible_resolutions.push_back({params.image_grid_pinpoints[i], params.image_grid_pinpoints[i+1]});
  1818. }
  1819. std::pair<int, int> best_resolution = select_best_resolution({img->nx, img->ny}, possible_resolutions);
  1820. // clip_image_save_to_bmp(*img, "input.bmp");
  1821. resize_and_pad_image(*img, *temp, best_resolution); // we do not pad with mean-bg color anymore in llava-1.6
  1822. // clip_image_save_to_bmp(*temp, "resized.bmp");
  1823. // visually verify normalized image:
  1824. // normalize_image_u8_to_f32(*temp, *res, ctx->image_mean, ctx->image_std);
  1825. // {
  1826. // clip_image_u8 * temp2 = clip_image_u8_init();
  1827. // clip_image_convert_f32_to_u8(*res, *temp2);
  1828. // clip_image_save_to_bmp(*temp2, "resized_normalized_f32.bmp");
  1829. // clip_image_u8_free(temp2);
  1830. // }
  1831. std::vector<clip_image_u8 *> patches = divide_to_patches_u8(*temp, params.image_size); // prepare spatial sorted main patches of image_size each (336 in llava-1.6)
  1832. clip_image_u8 *image_original_resize = clip_image_u8_init();
  1833. // bilinear_resize(*img, *image_original_resize, params.image_size, params.image_size); // in python this is "shortest_edge", but all CLIP are square
  1834. bicubic_resize(*img, *image_original_resize, params.image_size, params.image_size); // in python this is "shortest_edge", but all CLIP are square
  1835. patches.insert(patches.begin(), image_original_resize);
  1836. // clip_image_f32_batch_init(patches.size());
  1837. res_imgs->size = patches.size();
  1838. res_imgs->data = new clip_image_f32[res_imgs->size];
  1839. int num=0;
  1840. for (auto& patch : patches) {
  1841. normalize_image_u8_to_f32(patch, &res_imgs->data[num], ctx->image_mean, ctx->image_std);
  1842. num++;
  1843. }
  1844. for (size_t i = 0; i < patches.size(); i++) {
  1845. // LOG_DBG("patch %d: %d %d\n", i, patches[i]->nx, patches[i]->ny);
  1846. clip_image_u8_free(patches[i]);
  1847. }
  1848. clip_image_u8_free(temp);
  1849. return true;
  1850. } else {
  1851. temp->nx = img->nx;
  1852. temp->ny = img->ny;
  1853. temp->buf.resize(img->buf.size());
  1854. memcpy(temp->buf.data(), img->buf.data(), temp->buf.size());
  1855. }
  1856. }
  1857. const int nx = temp->nx;
  1858. const int ny = temp->ny;
  1859. // clip_image_save_to_bmp(*temp, "resized_vanilla.bmp");
  1860. const int nx2 = ctx->vision_model.hparams.image_size;
  1861. const int ny2 = ctx->vision_model.hparams.image_size;
  1862. clip_image_f32 * res = clip_image_f32_init();
  1863. res->nx = nx2;
  1864. res->ny = ny2;
  1865. res->buf.resize(3 * nx2 * ny2);
  1866. const float scale = std::max(nx, ny) / (float)ctx->vision_model.hparams.image_size;
  1867. const int nx3 = int(nx / scale + 0.5f);
  1868. const int ny3 = int(ny / scale + 0.5f);
  1869. const auto & m3 = ctx->image_mean; // {0.48145466f, 0.4578275f, 0.40821073f};
  1870. const auto & s3 = ctx->image_std; // {0.26862954f, 0.26130258f, 0.27577711f};
  1871. for (int y = 0; y < ny3; y++) {
  1872. for (int x = 0; x < nx3; x++) {
  1873. for (int c = 0; c < 3; c++) {
  1874. // linear interpolation
  1875. const float sx = (x + 0.5f) * scale - 0.5f;
  1876. const float sy = (y + 0.5f) * scale - 0.5f;
  1877. const int x0 = std::max(0, (int)std::floor(sx));
  1878. const int y0 = std::max(0, (int)std::floor(sy));
  1879. const int x1 = std::min(x0 + 1, nx - 1);
  1880. const int y1 = std::min(y0 + 1, ny - 1);
  1881. const float dx = sx - x0;
  1882. const float dy = sy - y0;
  1883. const int j00 = 3 * (y0 * nx + x0) + c;
  1884. const int j01 = 3 * (y0 * nx + x1) + c;
  1885. const int j10 = 3 * (y1 * nx + x0) + c;
  1886. const int j11 = 3 * (y1 * nx + x1) + c;
  1887. const float v00 = temp->buf[j00];
  1888. const float v01 = temp->buf[j01];
  1889. const float v10 = temp->buf[j10];
  1890. const float v11 = temp->buf[j11];
  1891. const float v0 = v00 * (1.0f - dx) + v01 * dx;
  1892. const float v1 = v10 * (1.0f - dx) + v11 * dx;
  1893. const float v = v0 * (1.0f - dy) + v1 * dy;
  1894. const uint8_t v2 = std::min(std::max(std::round(v), 0.0f), 255.0f);
  1895. const int i = 3 * (y * nx3 + x) + c;
  1896. res->buf[i] = ((float(v2) / 255.0f) - m3[c]) / s3[c];
  1897. }
  1898. }
  1899. }
  1900. clip_image_u8_free(temp);
  1901. // {
  1902. // clip_image_u8 * temp2 = clip_image_u8_init();
  1903. // clip_image_convert_f32_to_u8(*res, *temp2);
  1904. // clip_image_save_to_bmp(*temp2, "resized_normalized_f32_vanilla.bmp");
  1905. // clip_image_u8_free(temp2);
  1906. // }
  1907. // res_imgs.push_back(res);
  1908. res_imgs->size = 1;
  1909. res_imgs->data = new clip_image_f32[res_imgs->size];
  1910. res_imgs->data[0] = *res;
  1911. clip_image_f32_free(res);
  1912. return true;
  1913. }
  1914. ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
  1915. return ctx->vision_model.image_newline;
  1916. }
  1917. void clip_free(clip_ctx * ctx) {
  1918. ggml_free(ctx->ctx_data);
  1919. gguf_free(ctx->ctx_gguf);
  1920. ggml_backend_buffer_free(ctx->params_buffer);
  1921. ggml_backend_free(ctx->backend);
  1922. ggml_gallocr_free(ctx->compute_alloc);
  1923. delete ctx;
  1924. }
  1925. size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
  1926. return clip_n_patches(ctx) * clip_n_mmproj_embd(ctx) * sizeof(float);
  1927. }
  1928. int32_t clip_image_size(const struct clip_ctx * ctx) {
  1929. return ctx->vision_model.hparams.image_size;
  1930. }
  1931. int32_t clip_patch_size(const struct clip_ctx * ctx) {
  1932. return ctx->vision_model.hparams.patch_size;
  1933. }
  1934. int32_t clip_hidden_size(const struct clip_ctx * ctx) {
  1935. return ctx->vision_model.hparams.hidden_size;
  1936. }
  1937. const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
  1938. return ctx->vision_model.hparams.mm_patch_merge_type;
  1939. }
  1940. const int32_t * clip_image_grid(const struct clip_ctx * ctx) {
  1941. return ctx->vision_model.hparams.image_grid_pinpoints;
  1942. }
  1943. int clip_n_patches(const struct clip_ctx * ctx) {
  1944. const auto & params = ctx->vision_model.hparams;
  1945. int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
  1946. if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2) {
  1947. n_patches /= 4;
  1948. } else if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) {
  1949. if (ctx->minicpmv_version == 2) {
  1950. n_patches = 96;
  1951. }
  1952. else if (ctx->minicpmv_version == 3) {
  1953. n_patches = 64;
  1954. }
  1955. }
  1956. return n_patches;
  1957. }
  1958. static std::vector<std::vector<std::vector<float>>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector<std::vector<float>> & pos) {
  1959. assert(embed_dim % 2 == 0);
  1960. int H = pos.size();
  1961. int W = pos[0].size();
  1962. std::vector<float> omega(embed_dim / 2);
  1963. for (int i = 0; i < embed_dim / 2; ++i) {
  1964. omega[i] = 1.0 / pow(10000.0, static_cast<float>(i) / (embed_dim / 2));
  1965. }
  1966. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  1967. for (int h = 0; h < H; ++h) {
  1968. for (int w = 0; w < W; ++w) {
  1969. for (int d = 0; d < embed_dim / 2; ++d) {
  1970. float out_value = pos[h][w] * omega[d];
  1971. emb[h][w][d] = sin(out_value);
  1972. emb[h][w][d + embed_dim / 2] = cos(out_value);
  1973. }
  1974. }
  1975. }
  1976. return emb;
  1977. }
  1978. static std::vector<std::vector<std::vector<float>>> get_2d_sincos_pos_embed_from_grid(int embed_dim, const std::vector<std::vector<std::vector<float>>> & grid) {
  1979. assert(embed_dim % 2 == 0);
  1980. std::vector<std::vector<std::vector<float>>> emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[0]); // (H, W, D/2)
  1981. std::vector<std::vector<std::vector<float>>> emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[1]); // (H, W, D/2)
  1982. int H = emb_h.size();
  1983. int W = emb_h[0].size();
  1984. std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
  1985. for (int h = 0; h < H; ++h) {
  1986. for (int w = 0; w < W; ++w) {
  1987. for (int d = 0; d < embed_dim / 2; ++d) {
  1988. emb[h][w][d] = emb_h[h][w][d];
  1989. emb[h][w][d + embed_dim / 2] = emb_w[h][w][d];
  1990. }
  1991. }
  1992. }
  1993. return emb;
  1994. }
  1995. static std::vector<std::vector<float>> get_2d_sincos_pos_embed(int embed_dim, const std::pair<int, int> image_size) {
  1996. int grid_h_size = image_size.first;
  1997. int grid_w_size = image_size.second;
  1998. std::vector<float> grid_h(grid_h_size);
  1999. std::vector<float> grid_w(grid_w_size);
  2000. for (int i = 0; i < grid_h_size; ++i) {
  2001. grid_h[i] = static_cast<float>(i);
  2002. }
  2003. for (int i = 0; i < grid_w_size; ++i) {
  2004. grid_w[i] = static_cast<float>(i);
  2005. }
  2006. std::vector<std::vector<float>> grid(grid_h_size, std::vector<float>(grid_w_size));
  2007. for (int h = 0; h < grid_h_size; ++h) {
  2008. for (int w = 0; w < grid_w_size; ++w) {
  2009. grid[h][w] = grid_w[w];
  2010. }
  2011. }
  2012. std::vector<std::vector<std::vector<float>>> grid_2d = {grid, grid};
  2013. for (int h = 0; h < grid_h_size; ++h) {
  2014. for (int w = 0; w < grid_w_size; ++w) {
  2015. grid_2d[0][h][w] = grid_h[h];
  2016. grid_2d[1][h][w] = grid_w[w];
  2017. }
  2018. }
  2019. std::vector<std::vector<std::vector<float>>> pos_embed_3d = get_2d_sincos_pos_embed_from_grid(embed_dim, grid_2d);
  2020. int H = image_size.first;
  2021. int W = image_size.second;
  2022. std::vector<std::vector<float>> pos_embed_2d(H * W, std::vector<float>(embed_dim));
  2023. for (int h = 0; h < H; ++h) {
  2024. for (int w = 0; w < W; ++w) {
  2025. pos_embed_2d[w * H + h] = pos_embed_3d[h][w];
  2026. }
  2027. }
  2028. return pos_embed_2d;
  2029. }
  2030. bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
  2031. if (!ctx->has_vision_encoder) {
  2032. LOG_ERR("This gguf file seems to have no vision encoder\n");
  2033. return false;
  2034. }
  2035. clip_image_f32_batch imgs{};
  2036. imgs.size = 1;
  2037. imgs.data = img;
  2038. return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
  2039. }
  2040. bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs, float * vec) {
  2041. if (!ctx->has_vision_encoder) {
  2042. LOG_ERR("This gguf file seems to have no vision encoder\n");
  2043. return false;
  2044. }
  2045. int batch_size = imgs->size;
  2046. if (ctx->has_llava_projector) {
  2047. GGML_ASSERT(batch_size == 1); // TODO: support multiple images
  2048. }
  2049. if (ctx->has_minicpmv_projector) {
  2050. GGML_ASSERT(batch_size == 1);
  2051. }
  2052. // build the inference graph
  2053. ggml_cgraph * gf = clip_image_build_graph(ctx, imgs, ctx->load_image_size, true);
  2054. ggml_gallocr_alloc_graph(ctx->compute_alloc, gf);
  2055. // set inputs
  2056. const auto & model = ctx->vision_model;
  2057. const auto & hparams = model.hparams;
  2058. const int image_size = hparams.image_size;
  2059. int image_size_width = image_size;
  2060. int image_size_height = image_size;
  2061. if (ctx->has_minicpmv_projector) {
  2062. image_size_width = imgs->data[0].nx;
  2063. image_size_height = imgs->data[0].ny;
  2064. }
  2065. const int patch_size = hparams.patch_size;
  2066. const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
  2067. const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0);
  2068. if(ctx->load_image_size==nullptr){
  2069. ctx->load_image_size= clip_image_size_init();
  2070. }
  2071. const int pos_w = ctx->load_image_size->width/patch_size;
  2072. const int pos_h = ctx->load_image_size->height/patch_size;
  2073. {
  2074. struct ggml_tensor * inp_raw = ggml_graph_get_tensor(gf, "inp_raw");
  2075. float * data = (float *)malloc(ggml_nbytes(inp_raw));
  2076. for (size_t i = 0; i < imgs->size; i++) {
  2077. const int nx = imgs->data[i].nx;
  2078. const int ny = imgs->data[i].ny;
  2079. if (!ctx->has_minicpmv_projector) {
  2080. GGML_ASSERT(nx == image_size && ny == image_size);
  2081. }
  2082. const int n = nx * ny;
  2083. for (int b = 0; b < batch_size; b++) {
  2084. for (int k = 0; k < 3; k++) {
  2085. for (int y = 0; y < ny; y++) {
  2086. for (int x = 0; x < nx; x++) {
  2087. data[(b * 3 * n) + k * n + y * nx + x] = imgs->data[b].buf[3 * (y * nx + x) + k];
  2088. }
  2089. }
  2090. }
  2091. }
  2092. }
  2093. ggml_backend_tensor_set(inp_raw, data, 0, ggml_nbytes(inp_raw));
  2094. free(data);
  2095. }
  2096. if (ctx->has_minicpmv_projector) {
  2097. {
  2098. // inspired from siglip:
  2099. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
  2100. // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316
  2101. struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions");
  2102. int* positions_data = (int*)malloc(ggml_nbytes(positions));
  2103. int bucket_coords_h[70];
  2104. int bucket_coords_w[70];
  2105. for (int i = 0; i < pos_h; i++){
  2106. bucket_coords_h[i] = std::floor(70.0*i/pos_h);
  2107. }
  2108. for (int i = 0; i < pos_w; i++){
  2109. bucket_coords_w[i] = std::floor(70.0*i/pos_w);
  2110. }
  2111. for (int i = 0, id = 0; i < pos_h; i++){
  2112. for (int j = 0; j < pos_w; j++){
  2113. positions_data[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j];
  2114. }
  2115. }
  2116. ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions));
  2117. free(positions_data);
  2118. }
  2119. {
  2120. // inspired from resampler of Qwen-VL:
  2121. // -> https://huggingface.co/Qwen/Qwen-VL/tree/main
  2122. // -> https://huggingface.co/Qwen/Qwen-VL/blob/0547ed36a86561e2e42fecec8fd0c4f6953e33c4/visual.py#L23
  2123. struct ggml_tensor * pos_embed = ggml_graph_get_tensor(gf, "pos_embed");
  2124. int embed_dim = 4096;
  2125. if (ctx->minicpmv_version == 2) {
  2126. embed_dim = 4096;
  2127. }
  2128. else if (ctx->minicpmv_version == 3) {
  2129. embed_dim = 3584;
  2130. }
  2131. auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h));
  2132. float * pos_embed_data = (float *)malloc(ggml_nbytes(pos_embed));
  2133. for(int i=0;i<pos_w * pos_h;++i){
  2134. for(int j=0;j<embed_dim;++j){
  2135. pos_embed_data[i*embed_dim+j]=pos_embed_t[i][j];
  2136. }
  2137. }
  2138. ggml_backend_tensor_set(pos_embed, pos_embed_data, 0, ggml_nbytes(pos_embed));
  2139. free(pos_embed_data);
  2140. }
  2141. }
  2142. else{
  2143. {
  2144. if (ctx->has_class_embedding) {
  2145. struct ggml_tensor * embeddings = ggml_graph_get_tensor(gf, "embeddings");
  2146. void* zero_mem = malloc(ggml_nbytes(embeddings));
  2147. memset(zero_mem, 0, ggml_nbytes(embeddings));
  2148. ggml_backend_tensor_set(embeddings, zero_mem, 0, ggml_nbytes(embeddings));
  2149. free(zero_mem);
  2150. }
  2151. }
  2152. {
  2153. struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions");
  2154. int* positions_data = (int*)malloc(ggml_nbytes(positions));
  2155. for (int i = 0; i < num_positions; i++) {
  2156. positions_data[i] = i;
  2157. }
  2158. ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions));
  2159. free(positions_data);
  2160. }
  2161. {
  2162. struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches");
  2163. int* patches_data = (int*)malloc(ggml_nbytes(patches));
  2164. for (int i = 0; i < num_patches; i++) {
  2165. patches_data[i] = i + 1;
  2166. }
  2167. ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches));
  2168. free(patches_data);
  2169. }
  2170. }
  2171. if (ggml_backend_is_cpu(ctx->backend)) {
  2172. ggml_backend_cpu_set_n_threads(ctx->backend, n_threads);
  2173. }
  2174. ggml_backend_graph_compute(ctx->backend, gf);
  2175. // the last node is the embedding tensor
  2176. struct ggml_tensor * embeddings = ggml_graph_node(gf, -1);
  2177. // copy the embeddings to the location passed by the user
  2178. ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
  2179. return true;
  2180. }
  2181. bool clip_model_quantize(const char * fname_inp, const char * fname_out, const int itype) {
  2182. ggml_type type = GGML_TYPE_Q4_1;
  2183. assert(itype < GGML_TYPE_COUNT);
  2184. type = static_cast<ggml_type>(itype);
  2185. auto * ctx_clip = clip_model_load(fname_inp, 2);
  2186. const auto & ctx_src = ctx_clip->ctx_gguf;
  2187. const auto & ctx_data = ctx_clip->ctx_data;
  2188. auto * ctx_out = gguf_init_empty();
  2189. gguf_set_kv(ctx_out, ctx_src);
  2190. gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
  2191. gguf_set_val_u32(ctx_out, "general.file_type", itype);
  2192. auto fout = std::ofstream(fname_out, std::ios::binary);
  2193. const int n_tensors = gguf_get_n_tensors(ctx_src);
  2194. for (int i = 0; i < n_tensors; ++i) {
  2195. const char * name = gguf_get_tensor_name(ctx_src, i);
  2196. struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name);
  2197. gguf_add_tensor(ctx_out, cur);
  2198. }
  2199. const size_t meta_size = gguf_get_meta_size(ctx_out);
  2200. for (size_t i = 0; i < meta_size; ++i) {
  2201. fout.put(0);
  2202. }
  2203. // regexes of tensor names to be quantized
  2204. const std::vector<std::string> k_names = {
  2205. ".*weight",
  2206. };
  2207. std::vector<uint8_t> work(512);
  2208. std::vector<float> conv_buf(512);
  2209. size_t total_size_org = 0;
  2210. size_t total_size_new = 0;
  2211. for (int i = 0; i < n_tensors; ++i) {
  2212. const std::string name = gguf_get_tensor_name(ctx_src, i);
  2213. struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name.c_str());
  2214. enum ggml_type new_type;
  2215. void * new_data;
  2216. size_t new_size;
  2217. bool quantize = false;
  2218. for (const auto & s : k_names) {
  2219. if (std::regex_match(name, std::regex(s))) {
  2220. quantize = true;
  2221. break;
  2222. }
  2223. }
  2224. // quantize only 2D tensors
  2225. quantize &= (ggml_n_dims(cur) == 2);
  2226. if (quantize) {
  2227. new_type = type;
  2228. if (new_type >= GGML_TYPE_Q2_K && name.find("embd") != std::string::npos) {
  2229. new_type = GGML_TYPE_Q8_0; // ggml_get_rows needs non K type
  2230. // LOG_ERR("%s: quantizing %s to %s\n", __func__, name.c_str(), ggml_type_name(new_type));
  2231. }
  2232. const size_t n_elms = ggml_nelements(cur);
  2233. float * f32_data;
  2234. switch (cur->type) {
  2235. case GGML_TYPE_F32:
  2236. f32_data = (float *)cur->data;
  2237. break;
  2238. case GGML_TYPE_F16:
  2239. if (conv_buf.size() < n_elms) {
  2240. conv_buf.resize(n_elms);
  2241. }
  2242. for (size_t j = 0; j < n_elms; ++j) {
  2243. conv_buf[j] = ggml_fp16_to_fp32(((ggml_fp16_t *)cur->data)[j]);
  2244. }
  2245. f32_data = (float *)conv_buf.data();
  2246. break;
  2247. default:
  2248. LOG_ERR("Please use an input file in f32 or f16\n");
  2249. gguf_free(ctx_out);
  2250. return false;
  2251. }
  2252. if (work.size() < n_elms * 4) {
  2253. work.resize(n_elms * 4);
  2254. }
  2255. new_data = work.data();
  2256. new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, n_elms/cur->ne[0], cur->ne[0], nullptr);
  2257. } else {
  2258. new_type = cur->type;
  2259. new_data = cur->data;
  2260. new_size = ggml_nbytes(cur);
  2261. }
  2262. const size_t orig_size = ggml_nbytes(cur);
  2263. total_size_org += orig_size;
  2264. total_size_new += new_size;
  2265. gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
  2266. gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size);
  2267. fout.write((const char *)new_data, new_size);
  2268. size_t pad = GGML_PAD(new_size, gguf_get_alignment(ctx_out)) - new_size;
  2269. for (size_t j = 0; j < pad; ++j) {
  2270. fout.put(0);
  2271. }
  2272. LOG_INF("%s: n_dims = %d | quantize=%d | size = %f MB -> %f MB\n", name.c_str(), ggml_n_dims(cur), quantize,
  2273. orig_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
  2274. }
  2275. // go back to beginning of file and write the updated metadata
  2276. fout.seekp(0, std::ios::beg);
  2277. std::vector<uint8_t> meta(meta_size);
  2278. gguf_get_meta_data(ctx_out, meta.data());
  2279. fout.write((const char *)meta.data(), meta_size);
  2280. fout.close();
  2281. clip_free(ctx_clip);
  2282. gguf_free(ctx_out);
  2283. {
  2284. LOG_INF("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0);
  2285. LOG_INF("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0);
  2286. }
  2287. return true;
  2288. }
  2289. int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
  2290. if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
  2291. return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0];
  2292. }
  2293. if (ctx->proj_type == PROJECTOR_TYPE_LDPV2) {
  2294. return ctx->vision_model.mm_model_peg_0_b->ne[0];
  2295. }
  2296. if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
  2297. return ctx->vision_model.mm_2_b->ne[0];
  2298. }
  2299. if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
  2300. return ctx->vision_model.mm_3_b->ne[0];
  2301. }
  2302. if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) {
  2303. if (ctx->minicpmv_version == 2) {
  2304. return 4096;
  2305. }
  2306. else if (ctx->minicpmv_version == 3) {
  2307. return 3584;
  2308. }
  2309. }
  2310. std::string proj_type = PROJECTOR_TYPE_NAMES[ctx->proj_type];
  2311. throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
  2312. }
  2313. int clip_is_minicpmv(const struct clip_ctx * ctx) {
  2314. if (ctx->has_minicpmv_projector) {
  2315. return ctx->minicpmv_version;
  2316. }
  2317. return 0;
  2318. }