llama-context.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. #pragma once
  2. #include "llama.h"
  3. #include "llama-batch.h"
  4. #include "llama-cparams.h"
  5. #include "llama-model.h"
  6. #include "llama-kv-cache.h"
  7. #include "llama-adapter.h"
  8. #include "ggml-cpp.h"
  9. #include <map>
  10. #include <unordered_map>
  11. #include <vector>
  12. #include <set>
  13. struct llama_context {
  14. llama_context(const llama_model & model)
  15. : model(model)
  16. , t_start_us(model.t_start_us)
  17. , t_load_us(model.t_load_us) {}
  18. const struct llama_model & model;
  19. struct llama_cparams cparams;
  20. struct llama_sbatch sbatch; // TODO: revisit if needed
  21. struct llama_kv_cache kv_self;
  22. struct llama_control_vector cvec;
  23. std::unordered_map<struct llama_lora_adapter *, float> lora_adapters;
  24. std::vector<ggml_backend_ptr> backends;
  25. std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns;
  26. ggml_backend_t backend_cpu = nullptr;
  27. ggml_threadpool_t threadpool = nullptr;
  28. ggml_threadpool_t threadpool_batch = nullptr;
  29. bool has_evaluated_once = false;
  30. mutable int64_t t_start_us;
  31. mutable int64_t t_load_us;
  32. mutable int64_t t_p_eval_us = 0;
  33. mutable int64_t t_eval_us = 0;
  34. mutable int64_t t_compute_start_us = 0;
  35. mutable int64_t n_queued_tokens = 0;
  36. mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
  37. mutable int32_t n_eval = 0; // number of eval calls
  38. // host buffer for the model output (logits and embeddings)
  39. ggml_backend_buffer_ptr buf_output;
  40. // decode output (2-dimensional array: [n_outputs][n_vocab])
  41. size_t logits_size = 0; // capacity (of floats) for logits
  42. float * logits = nullptr;
  43. std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
  44. size_t output_size = 0; // capacity (of tokens positions) for the output buffers
  45. int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
  46. bool logits_all = false;
  47. // embeddings output (2-dimensional array: [n_outputs][n_embd])
  48. // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
  49. size_t embd_size = 0; // capacity (of floats) for embeddings
  50. float * embd = nullptr;
  51. // sequence embeddings output (map of [n_embd] vectors)
  52. // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
  53. std::map<llama_seq_id, std::vector<float>> embd_seq;
  54. // whether we are computing encoder output or decoder output
  55. bool is_encoding = false;
  56. // TODO: find a better way to accommodate mutli-dimension position encoding methods
  57. // number of position id each token get, 1 for each token in most cases.
  58. // when using m-rope, it will be 3 position ids per token to representing 3 dimension coordinate.
  59. int n_pos_per_token = 1;
  60. // output of the encoder part of the encoder-decoder models
  61. std::vector<float> embd_enc;
  62. std::vector<std::set<llama_seq_id>> seq_ids_enc;
  63. // memory buffers used to evaluate the model
  64. std::vector<uint8_t> buf_compute_meta;
  65. ggml_backend_sched_ptr sched;
  66. ggml_abort_callback abort_callback = nullptr;
  67. void * abort_callback_data = nullptr;
  68. // input tensors
  69. struct ggml_tensor * inp_tokens; // I32 [n_batch]
  70. struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
  71. struct ggml_tensor * inp_pos; // I32 [n_batch]
  72. struct ggml_tensor * inp_out_ids; // I32 [n_outputs]
  73. struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch]
  74. struct ggml_tensor * inp_KQ_mask_swa; // F32 [kv_size, n_batch]
  75. struct ggml_tensor * inp_K_shift; // I32 [kv_size]
  76. struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
  77. struct ggml_tensor * inp_cls; // I32 [n_batch]
  78. struct ggml_tensor * inp_s_copy; // I32 [kv_size]
  79. struct ggml_tensor * inp_s_mask; // F32 [1, n_kv]
  80. struct ggml_tensor * inp_s_seq; // I32 [n_kv, n_batch]
  81. struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch]
  82. struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc]
  83. struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
  84. struct ggml_tensor * inp_cross_attn_state; // F32 [4, n_embd, 1061]
  85. };
  86. // TODO: make these methods of llama_context
  87. void llama_set_k_shift(struct llama_context & lctx);
  88. void llama_set_s_copy(struct llama_context & lctx);
  89. void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch);
  90. // Make sure enough space is available for outputs.
  91. // Returns max number of outputs for which space was reserved.
  92. size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs);
  93. // make the outputs have the same order they had in the user-provided batch
  94. void llama_output_reorder(struct llama_context & ctx);
  95. // For internal test use
  96. // TODO: remove
  97. const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(struct llama_context * ctx);