12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394 |
- diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
- index 7cda5f10..50fbcf08 100644
- --- a/examples/llava/clip.cpp
- +++ b/examples/llava/clip.cpp
- @@ -709,9 +709,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
- embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
- embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
-
- - embeddings = ggml_gelu(ctx0, embeddings);
- - embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
- - embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
- + // paligemma missing second linear layer
- + if (model.mm_2_w) {
- + embeddings = ggml_gelu(ctx0, embeddings);
- + embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
- + embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
- + }
-
- } else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
- embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
- @@ -2076,7 +2079,10 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
- return ctx->vision_model.mm_model_peg_0_b->ne[0];
- }
- if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
- - return ctx->vision_model.mm_2_b->ne[0];
- + // paligemma missing second linear layer
- + if (ctx->vision_model.mm_2_b == nullptr) {
- + return ctx->vision_model.mm_0_b->ne[0];
- + }
- }
- if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
- return ctx->vision_model.mm_3_b->ne[0];
- diff --git a/include/llama.h b/include/llama.h
- index f23355a6..7c6301bf 100644
- --- a/include/llama.h
- +++ b/include/llama.h
- @@ -444,6 +444,9 @@ extern "C" {
- // Frees all allocated memory
- LLAMA_API void llama_free(struct llama_context * ctx);
-
- + // save image embeddings
- + LLAMA_API void set_image_embeds(struct llama_context *ctx, float *data);
- +
- LLAMA_API int64_t llama_time_us(void);
-
- LLAMA_API size_t llama_max_devices(void);
- diff --git a/src/llama.cpp b/src/llama.cpp
- index a7b1c9eb..b0a6bc27 100644
- --- a/src/llama.cpp
- +++ b/src/llama.cpp
- @@ -2668,6 +2668,7 @@ struct llama_context {
-
- const struct llama_model & model;
-
- + float *image_embeds;
- struct llama_cparams cparams;
- struct llama_sampling sampling;
- struct llama_kv_cache kv_self;
- @@ -2751,6 +2752,10 @@ struct llama_context {
- struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
- };
-
- +void set_image_embeds(llama_context *ctx, float *data) {
- + ctx->image_embeds = data;
- +}
- +
- struct llama_lora_weight {
- struct ggml_tensor * a = nullptr;
- struct ggml_tensor * b = nullptr;
- @@ -11599,6 +11604,15 @@ struct llm_build_context {
-
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
-
- + // set the image embeddings in the input tensor
- + if (lctx.image_embeds) {
- + struct ggml_tensor *image_embeds = ggml_dup_tensor(ctx0, inpL);
- + image_embeds->data = lctx.image_embeds;
- + image_embeds->ne[1] = 256;
- + inpL = ggml_set_2d_inplace(ctx0, inpL, image_embeds, inpL->nb[1], 0);
- + lctx.image_embeds = NULL;
- + }
- +
- inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
- cb(inpL, "inp_scaled", -1);
-
- @@ -14589,7 +14603,7 @@ static int llama_decode_internal(
- }
-
- // non-causal masks do not use the KV cache
- - if (hparams.causal_attn) {
- + if (hparams.causal_attn || lctx.image_embeds) {
- llama_kv_cache_update(&lctx);
-
- // if we have enough unused cells before the current head ->
|