12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455 |
- From c9a6ca9fc039233dee746a4da9705762cd9e515d Mon Sep 17 00:00:00 2001
- From: Michael Yang <mxyng@pm.me>
- Date: Mon, 16 Sep 2024 15:53:14 -0700
- Subject: [PATCH] 06-embeddings.diff
- ---
- src/llama.cpp | 17 ++++++++++-------
- 1 file changed, 10 insertions(+), 7 deletions(-)
- diff --git a/src/llama.cpp b/src/llama.cpp
- index d1e959fc..f79bd782 100644
- --- a/src/llama.cpp
- +++ b/src/llama.cpp
- @@ -15898,7 +15898,7 @@ static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) {
- const auto n_embd = hparams.n_embd;
-
- // TODO: use a per-batch flag for logits presence instead
- - const bool has_logits = !cparams.embeddings;
- + const bool has_logits = cparams.causal_attn;
- const bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE);
-
- const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0;
- @@ -16167,20 +16167,23 @@ static int llama_decode_internal(
- // no output
- res = nullptr;
- embd = nullptr;
- - } else if (cparams.embeddings) {
- - res = nullptr; // do not extract logits for embedding case
- - embd = nullptr;
- + }
- +
- + if (cparams.embeddings) {
- for (int i = gf->n_nodes - 1; i >= 0; --i) {
- - if (strcmp(gf->nodes[i]->name, "result_embd_pooled") == 0) {
- - embd = gf->nodes[i];
- + embd = gf->nodes[i];
- + if (strcmp(embd->name, "result_embd_pooled") == 0) {
- break;
- }
- }
- - GGML_ASSERT(embd != nullptr && "missing embeddings tensor");
- } else {
- embd = nullptr; // do not extract embeddings when not needed
- GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor");
- }
- +
- + if (!cparams.causal_attn) {
- + res = nullptr; // do not extract logits when not needed
- + }
- // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
-
- ggml_backend_sched_alloc_graph(lctx.sched, gf);
- --
- 2.46.0
|