llama-context.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /**
  2. * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file
  3. *
  4. * MIT License
  5. *
  6. * Copyright (c) 2023-2024 The ggml authors
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. * SOFTWARE.
  25. */
  26. #pragma once
  27. #include "llama.h"
  28. #include "llama-batch.h"
  29. #include "llama-cparams.h"
  30. #include "llama-model.h"
  31. #include "llama-kv-cache.h"
  32. #include "llama-adapter.h"
  33. #include "ggml-cpp.h"
  34. #include <map>
  35. #include <unordered_map>
  36. #include <vector>
  37. #include <set>
  38. struct llama_context {
  39. llama_context(const llama_model & model)
  40. : model(model)
  41. , t_start_us(model.t_start_us)
  42. , t_load_us(model.t_load_us) {}
  43. const struct llama_model & model;
  44. struct llama_cparams cparams;
  45. struct llama_sbatch sbatch; // TODO: revisit if needed
  46. struct llama_kv_cache kv_self;
  47. struct llama_control_vector cvec;
  48. std::unordered_map<struct llama_lora_adapter *, float> lora_adapters;
  49. std::vector<ggml_backend_ptr> backends;
  50. std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns;
  51. ggml_backend_t backend_cpu = nullptr;
  52. ggml_threadpool_t threadpool = nullptr;
  53. ggml_threadpool_t threadpool_batch = nullptr;
  54. bool has_evaluated_once = false;
  55. mutable int64_t t_start_us;
  56. mutable int64_t t_load_us;
  57. mutable int64_t t_p_eval_us = 0;
  58. mutable int64_t t_eval_us = 0;
  59. mutable int64_t t_compute_start_us = 0;
  60. mutable int64_t n_queued_tokens = 0;
  61. mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  62. mutable int32_t n_eval = 0; // number of eval calls
  63. // host buffer for the model output (logits and embeddings)
  64. ggml_backend_buffer_ptr buf_output;
  65. // decode output (2-dimensional array: [n_outputs][n_vocab])
  66. size_t logits_size = 0; // capacity (of floats) for logits
  67. float * logits = nullptr;
  68. std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
  69. size_t output_size = 0; // capacity (of tokens positions) for the output buffers
  70. int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
  71. bool logits_all = false;
  72. // embeddings output (2-dimensional array: [n_outputs][n_embd])
  73. // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
  74. size_t embd_size = 0; // capacity (of floats) for embeddings
  75. float * embd = nullptr;
  76. // sequence embeddings output (map of [n_embd] vectors)
  77. // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
  78. std::map<llama_seq_id, std::vector<float>> embd_seq;
  79. // whether we are computing encoder output or decoder output
  80. bool is_encoding = false;
  81. // TODO: find a better way to accommodate mutli-dimension position encoding methods
  82. // number of position id each token get, 1 for each token in most cases.
  83. // when using m-rope, it will be 3 position ids per token to representing 3 dimension coordinate.
  84. int n_pos_per_token = 1;
  85. // output of the encoder part of the encoder-decoder models
  86. std::vector<float> embd_enc;
  87. std::vector<std::set<llama_seq_id>> seq_ids_enc;
  88. // memory buffers used to evaluate the model
  89. std::vector<uint8_t> buf_compute_meta;
  90. ggml_backend_sched_ptr sched;
  91. ggml_abort_callback abort_callback = nullptr;
  92. void * abort_callback_data = nullptr;
  93. // input tensors
  94. struct ggml_tensor * inp_tokens; // I32 [n_batch]
  95. struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
  96. struct ggml_tensor * inp_pos; // I32 [n_batch]
  97. struct ggml_tensor * inp_out_ids; // I32 [n_outputs]
  98. struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch]
  99. struct ggml_tensor * inp_KQ_mask_swa; // F32 [kv_size, n_batch]
  100. struct ggml_tensor * inp_K_shift; // I32 [kv_size]
  101. struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
  102. struct ggml_tensor * inp_cls; // I32 [n_batch]
  103. struct ggml_tensor * inp_s_copy; // I32 [kv_size]
  104. struct ggml_tensor * inp_s_mask; // F32 [1, n_kv]
  105. struct ggml_tensor * inp_s_seq; // I32 [n_kv, n_batch]
  106. struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch]
  107. struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc]
  108. struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
  109. struct ggml_tensor * inp_cross_attn_state; // F32 [4, n_embd, 1061]
  110. };
  111. // TODO: make these methods of llama_context
  112. void llama_set_k_shift(struct llama_context & lctx);
  113. void llama_set_s_copy(struct llama_context & lctx);
  114. void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch);
  115. // Make sure enough space is available for outputs.
  116. // Returns max number of outputs for which space was reserved.
  117. size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs);
  118. // make the outputs have the same order they had in the user-provided batch
  119. void llama_output_reorder(struct llama_context & ctx);
  120. // For internal test use
  121. // TODO: remove
  122. const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(struct llama_context * ctx);