Josh Yan 8 月之前
父節點
當前提交
80eef7c7b1
共有 3 個文件被更改,包括 22 次插入15 次删除
  1. 1 1
      llm/ext_server/server.cpp
  2. 2 2
      llm/patches/06-embeddings.diff
  3. 19 12
      llm/patches/12-paligemma.diff

+ 1 - 1
llm/ext_server/server.cpp

@@ -1321,7 +1321,7 @@ struct llama_server_context
             llama_batch_add(batch, append_tokens[i], system_tokens.size() + slot.n_past, {slot.id}, true);
             slot.n_past += 1;
         }
-        // llama_set_causal_attn(ctx, false);
+        llama_set_causal_attn(ctx, false);
         return true;
     }
 

+ 2 - 2
llm/patches/06-embeddings.diff

@@ -7,7 +7,7 @@ index 1fe2b9f7..a43312a7 100644
  
      // TODO: use a per-batch flag for logits presence instead
 -    const bool has_logits = !cparams.embeddings;
-+    const bool has_logits =  cparams.causal_attn;
++    const bool has_logits =  cparams.causal_attn || lctx.image_embeds;
      const bool has_embd   =  lctx.is_encoding || (cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE));
  
      const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0;
@@ -36,7 +36,7 @@ index 1fe2b9f7..a43312a7 100644
              GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor");
          }
 +
-+        if (!cparams.causal_attn) {
++        if (!cparams.causal_attn && !has_image_embeds) {
 +            res = nullptr; // do not extract logits when not needed
 +        }
 +

+ 19 - 12
llm/patches/12-paligemma.diff

@@ -1,8 +1,8 @@
 diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
-index 7cda5f10..671806fd 100644
+index 54aa822c..67a02c4c 100644
 --- a/examples/llava/clip.cpp
 +++ b/examples/llava/clip.cpp
-@@ -708,11 +708,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
+@@ -764,11 +764,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
          if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
              embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
              embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
@@ -20,7 +20,7 @@ index 7cda5f10..671806fd 100644
          } else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
              embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
              embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
-@@ -2076,6 +2077,10 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
+@@ -2542,6 +2543,10 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
          return ctx->vision_model.mm_model_peg_0_b->ne[0];
      }
      if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
@@ -32,7 +32,7 @@ index 7cda5f10..671806fd 100644
      }
      if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
 diff --git a/include/llama.h b/include/llama.h
-index f23355a6..e48da401 100644
+index ce07f4fa..07b09c9a 100644
 --- a/include/llama.h
 +++ b/include/llama.h
 @@ -444,6 +444,12 @@ extern "C" {
@@ -42,17 +42,17 @@ index f23355a6..e48da401 100644
 +    // Sets image embeddings
 +    LLAMA_API void set_image_embeds(struct llama_context *ctx, float *data);
 +
-+    // Gets architecture
++    // Get architecture
 +    LLAMA_API int llama_get_architecture(struct llama_model *model);
 +
      LLAMA_API int64_t llama_time_us(void);
  
      LLAMA_API size_t llama_max_devices(void);
 diff --git a/src/llama.cpp b/src/llama.cpp
-index a7b1c9eb..ee067919 100644
+index 7f2f0003..754d3d5f 100644
 --- a/src/llama.cpp
 +++ b/src/llama.cpp
-@@ -2710,6 +2710,8 @@ struct llama_context {
+@@ -2719,6 +2719,8 @@ struct llama_context {
  
      bool logits_all = false;
  
@@ -61,7 +61,7 @@ index a7b1c9eb..ee067919 100644
      // embeddings output (2-dimensional array: [n_outputs][n_embd])
      // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
      size_t  embd_size = 0; // capacity (of floats) for embeddings
-@@ -11599,6 +11601,15 @@ struct llm_build_context {
+@@ -11660,6 +11662,15 @@ struct llm_build_context {
  
          inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  
@@ -77,17 +77,24 @@ index a7b1c9eb..ee067919 100644
          inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
          cb(inpL, "inp_scaled", -1);
  
-@@ -14589,7 +14600,8 @@ static int llama_decode_internal(
+@@ -14565,6 +14576,7 @@ static int llama_decode_internal(
+ 
+     const int64_t n_embd  = hparams.n_embd;
+     const int64_t n_vocab = hparams.n_vocab;
++    const bool has_image_embeds = lctx.image_embeds;
+ 
+     uint32_t n_outputs = 0;
+     uint32_t n_outputs_prev = 0;
+@@ -14678,7 +14690,7 @@ static int llama_decode_internal(
          }
  
          // non-causal masks do not use the KV cache
 -        if (hparams.causal_attn) {
-+        if (hparams.causal_attn || lctx.image_embeds)
-+        {
++        if (hparams.causal_attn || lctx.image_embeds) {
              llama_kv_cache_update(&lctx);
  
              // if we have enough unused cells before the current head ->
-@@ -16448,6 +16460,16 @@ void llama_free_model(struct llama_model * model) {
+@@ -16589,6 +16601,16 @@ void llama_free_model(struct llama_model * model) {
      delete model;
  }