sampling_ext.cpp 2.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. // TODO: this is a temporary wrapper to allow calling C++ code from CGo
  2. #include "sampling.h"
  3. #include "sampling_ext.h"
  4. #include "json-schema-to-grammar.h"
  5. #include "llama.h"
  6. #include "llama-model.h"
  7. #include "llama-model-loader.h"
  8. struct common_sampler *common_sampler_cinit(const struct llama_model *model, struct common_sampler_cparams *params) {
  9. try {
  10. common_params_sampling sparams;
  11. sparams.top_k = params->top_k;
  12. sparams.top_p = params->top_p;
  13. sparams.min_p = params->min_p;
  14. sparams.typ_p = params->typical_p;
  15. sparams.temp = params->temp;
  16. sparams.penalty_last_n = params->penalty_last_n;
  17. sparams.penalty_repeat = params->penalty_repeat;
  18. sparams.penalty_freq = params->penalty_freq;
  19. sparams.penalty_present = params->penalty_present;
  20. sparams.mirostat = params->mirostat;
  21. sparams.mirostat_tau = params->mirostat_tau;
  22. sparams.mirostat_eta = params->mirostat_eta;
  23. sparams.seed = params->seed;
  24. sparams.grammar = params->grammar;
  25. sparams.xtc_probability = 0.0;
  26. sparams.xtc_threshold = 0.5;
  27. return common_sampler_init(model, sparams);
  28. } catch (const std::exception &err) {
  29. return nullptr;
  30. }
  31. }
  32. void common_sampler_cfree(struct common_sampler *sampler) {
  33. common_sampler_free(sampler);
  34. }
  35. void common_sampler_creset(struct common_sampler *sampler) {
  36. common_sampler_reset(sampler);
  37. }
  38. void common_sampler_caccept(struct common_sampler *sampler, llama_token id, bool apply_grammar) {
  39. common_sampler_accept(sampler, id, apply_grammar);
  40. }
  41. llama_token common_sampler_csample(struct common_sampler *sampler, struct llama_context *ctx, int idx) {
  42. return common_sampler_sample(sampler, ctx, idx);
  43. }
  44. int schema_to_grammar(const char *json_schema, char *grammar, size_t max_len)
  45. {
  46. try
  47. {
  48. nlohmann::ordered_json schema = nlohmann::ordered_json::parse(json_schema);
  49. std::string grammar_str = json_schema_to_grammar(schema);
  50. size_t len = grammar_str.length();
  51. if (len >= max_len)
  52. {
  53. len = max_len - 1;
  54. }
  55. strncpy(grammar, grammar_str.c_str(), len);
  56. return len;
  57. }
  58. catch (const std::exception &e)
  59. {
  60. strncpy(grammar, "", max_len - 1);
  61. return 0;
  62. }
  63. }
  64. struct llama_vocab * llama_load_vocab_from_file(const char * fname) {
  65. llama_vocab * vocab = new llama_vocab();
  66. try {
  67. const auto kv = LLM_KV(LLM_ARCH_UNKNOWN);
  68. std::vector<std::string> splits = {};
  69. llama_model_loader ml(std::string(fname), splits, false, false, nullptr);
  70. vocab->load(ml, kv);
  71. } catch (const std::exception & err) {
  72. LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
  73. return nullptr;
  74. }
  75. return vocab;
  76. }
  77. void llama_free_vocab(struct llama_vocab * vocab) {
  78. delete vocab;
  79. }