0002-pretokenizer.patch 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041
  1. From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
  2. From: Michael Yang <mxyng@pm.me>
  3. Date: Mon, 16 Sep 2024 15:53:13 -0700
  4. Subject: [PATCH] pretokenizer
  5. ---
  6. src/llama.cpp | 14 +++-----------
  7. 1 file changed, 3 insertions(+), 11 deletions(-)
  8. diff --git a/src/llama.cpp b/src/llama.cpp
  9. index 4c0a1bb6..800dfb95 100644
  10. --- a/src/llama.cpp
  11. +++ b/src/llama.cpp
  12. @@ -6287,16 +6287,7 @@ static void llm_load_vocab(
  13. if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
  14. vocab.tokenizer_add_space_prefix = false;
  15. vocab.tokenizer_clean_spaces = true;
  16. - if (tokenizer_pre.empty()) {
  17. - LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__);
  18. - LLAMA_LOG_WARN("%s: \n", __func__);
  19. - LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
  20. - LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED! \n", __func__);
  21. - LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL \n", __func__);
  22. - LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
  23. - LLAMA_LOG_WARN("%s: \n", __func__);
  24. - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
  25. - } else if (tokenizer_pre == "default") {
  26. + if (tokenizer_pre == "default") {
  27. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
  28. } else if (
  29. tokenizer_pre == "llama3" ||
  30. @@ -6398,7 +6389,8 @@ static void llm_load_vocab(
  31. vocab.tokenizer_add_bos = true;
  32. vocab.tokenizer_clean_spaces = false;
  33. } else {
  34. - throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
  35. + LLAMA_LOG_WARN("%s: missing or unrecognized pre-tokenizer type, using: 'default'\n", __func__);
  36. + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
  37. }
  38. } else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
  39. vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;