|
@@ -1,8 +1,8 @@
|
|
|
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
|
|
|
-index 7cda5f10..671806fd 100644
|
|
|
+index 54aa822c..67a02c4c 100644
|
|
|
--- a/examples/llava/clip.cpp
|
|
|
+++ b/examples/llava/clip.cpp
|
|
|
-@@ -708,11 +708,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
|
|
+@@ -764,11 +764,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
|
|
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
|
|
|
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
|
|
|
embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
|
|
@@ -20,7 +20,7 @@ index 7cda5f10..671806fd 100644
|
|
|
} else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
|
|
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
|
|
|
embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
|
|
|
-@@ -2076,6 +2077,10 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
|
|
|
+@@ -2542,6 +2543,10 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
|
|
|
return ctx->vision_model.mm_model_peg_0_b->ne[0];
|
|
|
}
|
|
|
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
|
|
@@ -32,7 +32,7 @@ index 7cda5f10..671806fd 100644
|
|
|
}
|
|
|
if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
|
|
diff --git a/include/llama.h b/include/llama.h
|
|
|
-index f23355a6..e48da401 100644
|
|
|
+index ce07f4fa..07b09c9a 100644
|
|
|
--- a/include/llama.h
|
|
|
+++ b/include/llama.h
|
|
|
@@ -444,6 +444,12 @@ extern "C" {
|
|
@@ -42,17 +42,17 @@ index f23355a6..e48da401 100644
|
|
|
+ // Sets image embeddings
|
|
|
+ LLAMA_API void set_image_embeds(struct llama_context *ctx, float *data);
|
|
|
+
|
|
|
-+ // Gets architecture
|
|
|
++ // Get architecture
|
|
|
+ LLAMA_API int llama_get_architecture(struct llama_model *model);
|
|
|
+
|
|
|
LLAMA_API int64_t llama_time_us(void);
|
|
|
|
|
|
LLAMA_API size_t llama_max_devices(void);
|
|
|
diff --git a/src/llama.cpp b/src/llama.cpp
|
|
|
-index a7b1c9eb..ee067919 100644
|
|
|
+index 7f2f0003..754d3d5f 100644
|
|
|
--- a/src/llama.cpp
|
|
|
+++ b/src/llama.cpp
|
|
|
-@@ -2710,6 +2710,8 @@ struct llama_context {
|
|
|
+@@ -2719,6 +2719,8 @@ struct llama_context {
|
|
|
|
|
|
bool logits_all = false;
|
|
|
|
|
@@ -61,7 +61,7 @@ index a7b1c9eb..ee067919 100644
|
|
|
// embeddings output (2-dimensional array: [n_outputs][n_embd])
|
|
|
// populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
|
|
|
size_t embd_size = 0; // capacity (of floats) for embeddings
|
|
|
-@@ -11599,6 +11601,15 @@ struct llm_build_context {
|
|
|
+@@ -11660,6 +11662,15 @@ struct llm_build_context {
|
|
|
|
|
|
inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
|
|
|
|
|
@@ -77,17 +77,24 @@ index a7b1c9eb..ee067919 100644
|
|
|
inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
|
|
|
cb(inpL, "inp_scaled", -1);
|
|
|
|
|
|
-@@ -14589,7 +14600,8 @@ static int llama_decode_internal(
|
|
|
+@@ -14565,6 +14576,7 @@ static int llama_decode_internal(
|
|
|
+
|
|
|
+ const int64_t n_embd = hparams.n_embd;
|
|
|
+ const int64_t n_vocab = hparams.n_vocab;
|
|
|
++ const bool has_image_embeds = lctx.image_embeds;
|
|
|
+
|
|
|
+ uint32_t n_outputs = 0;
|
|
|
+ uint32_t n_outputs_prev = 0;
|
|
|
+@@ -14678,7 +14690,7 @@ static int llama_decode_internal(
|
|
|
}
|
|
|
|
|
|
// non-causal masks do not use the KV cache
|
|
|
- if (hparams.causal_attn) {
|
|
|
-+ if (hparams.causal_attn || lctx.image_embeds)
|
|
|
-+ {
|
|
|
++ if (hparams.causal_attn || lctx.image_embeds) {
|
|
|
llama_kv_cache_update(&lctx);
|
|
|
|
|
|
// if we have enough unused cells before the current head ->
|
|
|
-@@ -16448,6 +16460,16 @@ void llama_free_model(struct llama_model * model) {
|
|
|
+@@ -16589,6 +16601,16 @@ void llama_free_model(struct llama_model * model) {
|
|
|
delete model;
|
|
|
}
|
|
|
|