llama-impl.cpp 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. /**
  2. * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file
  3. *
  4. * MIT License
  5. *
  6. * Copyright (c) 2023-2024 The ggml authors
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. * SOFTWARE.
  25. */
  26. #include "llama-impl.h"
  27. #include "llama.h"
  28. #include <cinttypes>
  29. #include <climits>
  30. #include <cstdarg>
  31. #include <cstring>
  32. #include <vector>
  33. #include <sstream>
  34. struct llama_logger_state {
  35. ggml_log_callback log_callback = llama_log_callback_default;
  36. void * log_callback_user_data = nullptr;
  37. };
  38. static llama_logger_state g_logger_state;
  39. time_meas::time_meas(int64_t & t_acc, bool disable) : t_start_us(disable ? -1 : ggml_time_us()), t_acc(t_acc) {}
  40. time_meas::~time_meas() {
  41. if (t_start_us >= 0) {
  42. t_acc += ggml_time_us() - t_start_us;
  43. }
  44. }
  45. void llama_log_set(ggml_log_callback log_callback, void * user_data) {
  46. ggml_log_set(log_callback, user_data);
  47. g_logger_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
  48. g_logger_state.log_callback_user_data = user_data;
  49. }
  50. static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
  51. va_list args_copy;
  52. va_copy(args_copy, args);
  53. char buffer[128];
  54. int len = vsnprintf(buffer, 128, format, args);
  55. if (len < 128) {
  56. g_logger_state.log_callback(level, buffer, g_logger_state.log_callback_user_data);
  57. } else {
  58. char * buffer2 = new char[len + 1];
  59. vsnprintf(buffer2, len + 1, format, args_copy);
  60. buffer2[len] = 0;
  61. g_logger_state.log_callback(level, buffer2, g_logger_state.log_callback_user_data);
  62. delete[] buffer2;
  63. }
  64. va_end(args_copy);
  65. }
  66. void llama_log_internal(ggml_log_level level, const char * format, ...) {
  67. va_list args;
  68. va_start(args, format);
  69. llama_log_internal_v(level, format, args);
  70. va_end(args);
  71. }
  72. void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
  73. (void) level;
  74. (void) user_data;
  75. fputs(text, stderr);
  76. fflush(stderr);
  77. }
  78. void replace_all(std::string & s, const std::string & search, const std::string & replace) {
  79. if (search.empty()) {
  80. return;
  81. }
  82. std::string builder;
  83. builder.reserve(s.length());
  84. size_t pos = 0;
  85. size_t last_pos = 0;
  86. while ((pos = s.find(search, last_pos)) != std::string::npos) {
  87. builder.append(s, last_pos, pos - last_pos);
  88. builder.append(replace);
  89. last_pos = pos + search.length();
  90. }
  91. builder.append(s, last_pos, std::string::npos);
  92. s = std::move(builder);
  93. }
  94. std::string format(const char * fmt, ...) {
  95. va_list ap;
  96. va_list ap2;
  97. va_start(ap, fmt);
  98. va_copy(ap2, ap);
  99. int size = vsnprintf(NULL, 0, fmt, ap);
  100. GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
  101. std::vector<char> buf(size + 1);
  102. int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
  103. GGML_ASSERT(size2 == size);
  104. va_end(ap2);
  105. va_end(ap);
  106. return std::string(buf.data(), size);
  107. }
  108. std::string llama_format_tensor_shape(const std::vector<int64_t> & ne) {
  109. char buf[256];
  110. snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
  111. for (size_t i = 1; i < ne.size(); i++) {
  112. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
  113. }
  114. return buf;
  115. }
  116. std::string llama_format_tensor_shape(const struct ggml_tensor * t) {
  117. char buf[256];
  118. snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]);
  119. for (int i = 1; i < GGML_MAX_DIMS; i++) {
  120. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]);
  121. }
  122. return buf;
  123. }
  124. static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
  125. switch (type) {
  126. case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
  127. case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
  128. case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
  129. case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
  130. case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
  131. case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
  132. case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
  133. case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
  134. case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
  135. case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
  136. case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
  137. default: return format("unknown type %d", type);
  138. }
  139. }
  140. std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
  141. const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
  142. switch (type) {
  143. case GGUF_TYPE_STRING:
  144. return gguf_get_val_str(ctx_gguf, i);
  145. case GGUF_TYPE_ARRAY:
  146. {
  147. const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
  148. int arr_n = gguf_get_arr_n(ctx_gguf, i);
  149. const void * data = gguf_get_arr_data(ctx_gguf, i);
  150. std::stringstream ss;
  151. ss << "[";
  152. for (int j = 0; j < arr_n; j++) {
  153. if (arr_type == GGUF_TYPE_STRING) {
  154. std::string val = gguf_get_arr_str(ctx_gguf, i, j);
  155. // escape quotes
  156. replace_all(val, "\\", "\\\\");
  157. replace_all(val, "\"", "\\\"");
  158. ss << '"' << val << '"';
  159. } else if (arr_type == GGUF_TYPE_ARRAY) {
  160. ss << "???";
  161. } else {
  162. ss << gguf_data_to_str(arr_type, data, j);
  163. }
  164. if (j < arr_n - 1) {
  165. ss << ", ";
  166. }
  167. }
  168. ss << "]";
  169. return ss.str();
  170. }
  171. default:
  172. return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
  173. }
  174. }