123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113 |
- diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
- index 9c0d351e..019a147c 100644
- --- a/examples/llava/clip.cpp
- +++ b/examples/llava/clip.cpp
- @@ -718,10 +718,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
- embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
- embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
-
- - embeddings = ggml_gelu(ctx0, embeddings);
- - embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
- - embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
- -
- + if (model.mm_2_w)
- + {
- + embeddings = ggml_gelu(ctx0, embeddings);
- + embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
- + embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
- + }
- } else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
- embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
- embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
- @@ -2102,6 +2104,10 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
- return ctx->vision_model.mm_model_peg_0_b->ne[0];
- }
- if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
- + if (ctx->vision_model.mm_2_b == nullptr)
- + {
- + return ctx->vision_model.mm_0_b->ne[0];
- + }
- return ctx->vision_model.mm_2_b->ne[0];
- }
- if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
- diff --git a/include/llama.h b/include/llama.h
- index 6072e76e..4c572a74 100644
- --- a/include/llama.h
- +++ b/include/llama.h
- @@ -444,6 +444,12 @@ extern "C" {
- // Frees all allocated memory
- LLAMA_API void llama_free(struct llama_context * ctx);
-
- + // Sets image embeddings
- + LLAMA_API void set_image_embeds(struct llama_context *ctx, float *data);
- +
- + // Get architecture
- + LLAMA_API int llama_get_architecture(struct llama_model *model);
- +
- LLAMA_API int64_t llama_time_us(void);
-
- LLAMA_API size_t llama_max_devices(void);
- diff --git a/src/llama.cpp b/src/llama.cpp
- index d883ed19..322b4b59 100644
- --- a/src/llama.cpp
- +++ b/src/llama.cpp
- @@ -2710,6 +2710,8 @@ struct llama_context {
-
- bool logits_all = false;
-
- + float *image_embeds = nullptr;
- +
- // embeddings output (2-dimensional array: [n_outputs][n_embd])
- // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
- size_t embd_size = 0; // capacity (of floats) for embeddings
- @@ -11591,6 +11593,15 @@ struct llm_build_context {
-
- inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
-
- + if (lctx.image_embeds)
- + {
- + struct ggml_tensor *image_embeds = ggml_dup_tensor(ctx0, inpL);
- + image_embeds->data = lctx.image_embeds;
- + image_embeds->ne[1] = 256;
- + inpL = ggml_set_2d_inplace(ctx0, inpL, image_embeds, inpL->nb[1], 0);
- + lctx.image_embeds = NULL;
- + }
- +
- inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
- cb(inpL, "inp_scaled", -1);
-
- @@ -14468,6 +14479,7 @@ static int llama_decode_internal(
-
- const int64_t n_embd = hparams.n_embd;
- const int64_t n_vocab = hparams.n_vocab;
- + const bool has_image_embeds = lctx.image_embeds;
-
- uint32_t n_outputs = 0;
- uint32_t n_outputs_prev = 0;
- @@ -14581,7 +14593,8 @@ static int llama_decode_internal(
- }
-
- // non-causal masks do not use the KV cache
- - if (hparams.causal_attn) {
- + if (hparams.causal_attn || lctx.image_embeds)
- + {
- llama_kv_cache_update(&lctx);
-
- // if we have enough unused cells before the current head ->
- @@ -16455,6 +16468,16 @@ void llama_free_model(struct llama_model * model) {
- delete model;
- }
-
- +void set_image_embeds(llama_context *ctx, float *data)
- +{
- + ctx->image_embeds = data;
- +}
- +
- +int llama_get_architecture(llama_model *model)
- +{
- + return model->arch;
- +}
- +
- struct llama_context * llama_new_context_with_model(
- struct llama_model * model,
- struct llama_context_params params) {
|