فهرست منبع

working causal attention

Josh Yan 8 ماه پیش
والد
کامیت
a6d30ecefe
1فایلهای تغییر یافته به همراه17 افزوده شده و 17 حذف شده
  1. 17 17
      llm/patches/12-paligemma.diff

+ 17 - 17
llm/patches/12-paligemma.diff

@@ -1,26 +1,25 @@
 diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
-index 54aa822c..67a02c4c 100644
+index 9c0d351e..019a147c 100644
 --- a/examples/llava/clip.cpp
 +++ b/examples/llava/clip.cpp
-@@ -764,11 +764,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
-         if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
+@@ -718,10 +718,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
              embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
              embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
--
+ 
 -            embeddings = ggml_gelu(ctx0, embeddings);
 -            embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
 -            embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
 -
 +            if (model.mm_2_w)
 +            {
-+                embeddings = ggml_gelu(ctx0, embeddings);
-+                embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
-+                embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
-+            }
++                 embeddings = ggml_gelu(ctx0, embeddings);
++                 embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
++                 embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
++             }
          } else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
              embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
              embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
-@@ -2542,6 +2543,10 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
+@@ -2102,6 +2104,10 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
          return ctx->vision_model.mm_model_peg_0_b->ne[0];
      }
      if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
@@ -32,7 +31,7 @@ index 54aa822c..67a02c4c 100644
      }
      if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
 diff --git a/include/llama.h b/include/llama.h
-index ce07f4fa..07b09c9a 100644
+index 6072e76e..4c572a74 100644
 --- a/include/llama.h
 +++ b/include/llama.h
 @@ -444,6 +444,12 @@ extern "C" {
@@ -49,10 +48,10 @@ index ce07f4fa..07b09c9a 100644
  
      LLAMA_API size_t llama_max_devices(void);
 diff --git a/src/llama.cpp b/src/llama.cpp
-index 7f2f0003..754d3d5f 100644
+index d883ed19..322b4b59 100644
 --- a/src/llama.cpp
 +++ b/src/llama.cpp
-@@ -2719,6 +2719,8 @@ struct llama_context {
+@@ -2710,6 +2710,8 @@ struct llama_context {
  
      bool logits_all = false;
  
@@ -61,7 +60,7 @@ index 7f2f0003..754d3d5f 100644
      // embeddings output (2-dimensional array: [n_outputs][n_embd])
      // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
      size_t  embd_size = 0; // capacity (of floats) for embeddings
-@@ -11660,6 +11662,15 @@ struct llm_build_context {
+@@ -11591,6 +11593,15 @@ struct llm_build_context {
  
          inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  
@@ -77,7 +76,7 @@ index 7f2f0003..754d3d5f 100644
          inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
          cb(inpL, "inp_scaled", -1);
  
-@@ -14565,6 +14576,7 @@ static int llama_decode_internal(
+@@ -14468,6 +14479,7 @@ static int llama_decode_internal(
  
      const int64_t n_embd  = hparams.n_embd;
      const int64_t n_vocab = hparams.n_vocab;
@@ -85,16 +84,17 @@ index 7f2f0003..754d3d5f 100644
  
      uint32_t n_outputs = 0;
      uint32_t n_outputs_prev = 0;
-@@ -14678,7 +14690,7 @@ static int llama_decode_internal(
+@@ -14581,7 +14593,8 @@ static int llama_decode_internal(
          }
  
          // non-causal masks do not use the KV cache
 -        if (hparams.causal_attn) {
-+        if (hparams.causal_attn || lctx.image_embeds) {
++        if (hparams.causal_attn || lctx.image_embeds)
++        {
              llama_kv_cache_update(&lctx);
  
              // if we have enough unused cells before the current head ->
-@@ -16589,6 +16601,16 @@ void llama_free_model(struct llama_model * model) {
+@@ -16455,6 +16468,16 @@ void llama_free_model(struct llama_model * model) {
      delete model;
  }