1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192 |
- /**
- * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file
- *
- * MIT License
- *
- * Copyright (c) 2023-2024 The ggml authors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
- #pragma once
- #include "llama-impl.h"
- #include "llama-hparams.h"
- #include "ggml-cpp.h"
- #include <unordered_map>
- #include <vector>
- //
- // llama_adapter_cvec
- //
- // TODO: rename to llama_adapter_cvec
- struct llama_control_vector {
- std::vector<ggml_context_ptr> ctxs;
- std::vector<ggml_backend_buffer_ptr> bufs;
- std::vector<struct ggml_tensor *> tensors; // per layer
- int32_t layer_start = -1;
- int32_t layer_end = -1;
- struct ggml_tensor * tensor_for(int il) const;
- struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const;
- };
- int32_t llama_control_vector_apply(
- struct llama_control_vector & cvec,
- const llama_model & model,
- const float * data,
- size_t len,
- int32_t n_embd,
- int32_t il_start,
- int32_t il_end);
- //
- // llama_adapter_lora
- //
- // TODO: rename to llama_adapter_lora_weight
- struct llama_lora_weight {
- struct ggml_tensor * a = nullptr;
- struct ggml_tensor * b = nullptr;
- llama_lora_weight() = default;
- llama_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {}
- };
- // TODO: rename to llama_adapter_lora
- struct llama_lora_adapter {
- // map tensor name to lora_a_b
- std::unordered_map<std::string, struct llama_lora_weight> ab_map;
- std::vector<ggml_context_ptr> ctxs;
- std::vector<ggml_backend_buffer_ptr> bufs;
- float alpha;
- llama_lora_adapter() = default;
- ~llama_lora_adapter() = default;
- llama_lora_weight * get_weight(struct ggml_tensor * w);
- };
|