1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011 |
- /**
- * llama.cpp - commit ba1cb19cdd0d92e012e0f6e009e0620f854b6afd - do not edit this file
- *
- * MIT License
- *
- * Copyright (c) 2023-2024 The ggml authors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
- #include "llama-vocab.h"
- #include "unicode.h"
- #include <algorithm>
- #include <cassert>
- #include <cfloat>
- #include <climits>
- #include <cstdarg>
- #include <cstring>
- #include <forward_list>
- #include <queue>
- #include <sstream>
- //
- // helpers
- //
- LLAMA_ATTRIBUTE_FORMAT(1, 2)
- static std::string format(const char * fmt, ...) {
- va_list ap;
- va_list ap2;
- va_start(ap, fmt);
- va_copy(ap2, ap);
- int size = vsnprintf(NULL, 0, fmt, ap);
- GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
- std::vector<char> buf(size + 1);
- int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
- GGML_ASSERT(size2 == size);
- va_end(ap2);
- va_end(ap);
- return std::string(buf.data(), size);
- }
- struct naive_trie {
- naive_trie() : has_value(false), value(0) {
- }
- void insert(const char * key, size_t len, int32_t value = 0) {
- if (len == 0) {
- this->has_value = true;
- this->value = value;
- return;
- }
- char c = key[0];
- auto res = children.find(c);
- if (res != children.end()) {
- res->second.insert(key + 1, len - 1, value);
- } else {
- auto res = children.insert(std::make_pair(c, naive_trie()));
- res.first->second.insert(key + 1, len - 1, value);
- }
- }
- std::pair<const char *, size_t> get_longest_prefix(const char * key, size_t len, size_t offset = 0) const {
- if (len == 0 || offset == len) {
- return std::make_pair(key, offset);
- }
- char c = key[offset];
- auto res = children.find(c);
- if (res != children.end()) {
- return res->second.get_longest_prefix(key, len, offset + 1);
- }
- return std::make_pair(key, offset);
- }
- const struct naive_trie * traverse(const char c) const {
- auto res = children.find(c);
- if (res != children.end()) {
- return &res->second;
- }
- return NULL;
- }
- std::map<char, struct naive_trie> children;
- bool has_value;
- llama_token value;
- };
- //
- // impl
- //
- struct llm_tokenizer {
- llm_tokenizer() {}
- virtual ~llm_tokenizer() = default;
- };
- llama_vocab::~llama_vocab() {
- delete tokenizer;
- }
- int llama_vocab::find_bpe_rank(const std::string & token_left, const std::string & token_right) const {
- GGML_ASSERT(token_left.find(' ') == std::string::npos);
- GGML_ASSERT(token_left.find('\n') == std::string::npos);
- GGML_ASSERT(token_right.find(' ') == std::string::npos);
- GGML_ASSERT(token_right.find('\n') == std::string::npos);
- auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
- if (it == bpe_ranks.end()) {
- return -1;
- }
- return it->second;
- }
- static enum llama_vocab_type llama_vocab_get_type(const llama_vocab & vocab) {
- return vocab.type;
- }
- static bool llama_is_normal_token(const llama_vocab & vocab, llama_token id) {
- GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
- return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_NORMAL;
- }
- static bool llama_is_unknown_token(const llama_vocab & vocab, llama_token id) {
- GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
- return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_UNKNOWN;
- }
- static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) {
- GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
- return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_CONTROL;
- }
- static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
- GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
- return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_BYTE;
- }
- static bool llama_is_user_defined_token(const llama_vocab & vocab, llama_token id) {
- GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
- return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_USER_DEFINED;
- }
- static bool llama_is_unused_token(const llama_vocab & vocab, llama_token id) {
- GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
- return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_UNUSED;
- }
- static uint8_t llama_token_to_byte(const llama_vocab & vocab, llama_token id) {
- GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
- GGML_ASSERT(llama_is_byte_token(vocab, id));
- const auto & token_data = vocab.id_to_token.at(id);
- switch (llama_vocab_get_type(vocab)) {
- case LLAMA_VOCAB_TYPE_SPM:
- case LLAMA_VOCAB_TYPE_UGM: {
- auto buf = token_data.text.substr(3, 2);
- return strtol(buf.c_str(), NULL, 16);
- }
- case LLAMA_VOCAB_TYPE_BPE: {
- GGML_ABORT("fatal error");
- //return unicode_utf8_to_byte(token_data.text); // TODO: why is this here after GGML_ASSERT?
- }
- case LLAMA_VOCAB_TYPE_WPM: {
- GGML_ABORT("fatal error");
- }
- default:
- GGML_ABORT("fatal error");
- }
- }
- static void llama_escape_whitespace(std::string & text) {
- replace_all(text, " ", "\xe2\x96\x81");
- }
- static void llama_unescape_whitespace(std::string & word) {
- replace_all(word, "\xe2\x96\x81", " ");
- }
- struct llm_symbol {
- using index = int;
- index prev;
- index next;
- const char * text;
- size_t n;
- };
- static_assert(std::is_trivially_copyable<llm_symbol>::value, "llm_symbol is not trivially copyable");
- //
- // SPM tokenizer
- // original implementation:
- // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
- //
- struct llm_bigram_spm {
- struct comparator {
- bool operator()(llm_bigram_spm & l, llm_bigram_spm & r) {
- return (l.score < r.score) || (l.score == r.score && l.left > r.left);
- }
- };
- using queue_storage = std::vector<llm_bigram_spm>;
- using queue = std::priority_queue<llm_bigram_spm, queue_storage, comparator>;
- llm_symbol::index left;
- llm_symbol::index right;
- float score;
- size_t size;
- };
- struct llm_tokenizer_spm : llm_tokenizer {
- llm_tokenizer_spm(const llama_vocab & /*vocab*/) : llm_tokenizer() {}
- };
- struct llm_tokenizer_spm_session {
- llm_tokenizer_spm_session(const llama_vocab & vocab) : vocab(vocab) {}
- void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
- // split string into utf8 chars
- int index = 0;
- size_t offs = 0;
- while (offs < text.size()) {
- llm_symbol sym;
- size_t len = unicode_len_utf8(text[offs]);
- sym.text = text.c_str() + offs;
- sym.n = std::min(len, text.size() - offs);
- offs += sym.n;
- sym.prev = index - 1;
- sym.next = offs == text.size() ? -1 : index + 1;
- index++;
- symbols.emplace_back(sym);
- }
- // seed the work queue with all possible 2-character tokens.
- for (int i = 1; i < (int) symbols.size(); ++i) {
- try_add_bigram(i - 1, i);
- }
- // keep substituting the highest frequency pairs for as long as we can.
- while (!work_queue.empty()) {
- auto bigram = work_queue.top();
- work_queue.pop();
- auto & left_sym = symbols[bigram.left];
- auto & right_sym = symbols[bigram.right];
- // if one of the symbols already got merged, skip it.
- if (left_sym.n == 0 || right_sym.n == 0 ||
- left_sym.n + right_sym.n != bigram.size) {
- continue;
- }
- // merge the right sym into the left one
- left_sym.n += right_sym.n;
- right_sym.n = 0;
- //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
- // remove the right sym from the chain
- left_sym.next = right_sym.next;
- if (right_sym.next >= 0) {
- symbols[right_sym.next].prev = bigram.left;
- }
- // find more substitutions
- try_add_bigram(left_sym.prev, bigram.left);
- try_add_bigram(bigram.left, left_sym.next);
- }
- for (int i = 0; i != -1; i = symbols[i].next) {
- auto & symbol = symbols[i];
- resegment(symbol, output);
- }
- }
- private:
- void resegment(llm_symbol & symbol, std::vector<llama_vocab::id> & output) {
- auto text = std::string(symbol.text, symbol.n);
- auto token = vocab.token_to_id.find(text);
- // Do we need to support is_unused?
- if (token != vocab.token_to_id.end()) {
- output.push_back((*token).second);
- return;
- }
- const auto p = rev_merge.find(text);
- if (p == rev_merge.end()) {
- // output any symbols that did not form tokens as bytes.
- output.reserve(output.size() + symbol.n);
- for (int j = 0; j < (int)symbol.n; ++j) {
- llama_vocab::id token_id = llama_byte_to_token_impl(vocab, symbol.text[j]);
- output.push_back(token_id);
- }
- return;
- }
- resegment(symbols[p->second.first], output);
- resegment(symbols[p->second.second], output);
- }
- void try_add_bigram(int left, int right) {
- if (left == -1 || right == -1) {
- return;
- }
- const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
- auto token = vocab.token_to_id.find(text);
- if (token == vocab.token_to_id.end()) {
- return;
- }
- if (static_cast<size_t>((*token).second) >= vocab.id_to_token.size()) {
- return;
- }
- const auto & tok_data = vocab.id_to_token[(*token).second];
- llm_bigram_spm bigram;
- bigram.left = left;
- bigram.right = right;
- bigram.score = tok_data.score;
- bigram.size = text.size();
- work_queue.push(bigram);
- // Do we need to support is_unused?
- rev_merge[text] = std::make_pair(left, right);
- }
- const llama_vocab & vocab;
- // currently unused
- // const llm_tokenizer_spm * spm_tokenizer;
- std::vector<llm_symbol> symbols;
- llm_bigram_spm::queue work_queue;
- std::map<std::string, std::pair<int, int>> rev_merge;
- };
- //
- // BPE tokenizer
- // adapted from https://github.com/cmp-nct/ggllm.cpp [MIT License]
- // tried to simplify unicode stuff, so most likely does not work 100% correctly!
- //
- // TODO: there are a lot of common parts between spm and bpe tokenizers, should be refactored and reused
- template<typename T, typename Container = std::vector<T>, typename Compare = std::less<typename Container::value_type>>
- class llama_priority_queue : public std::priority_queue<T, Container, Compare> {
- public:
- using std::priority_queue<T, Container, Compare>::priority_queue;
- T pop_move() {
- T item = std::move(this->c.front());
- std::pop_heap(this->c.begin(), this->c.end(), this->comp);
- this->c.pop_back();
- return item;
- }
- void pop() = delete;
- };
- struct llm_bigram_bpe {
- struct comparator {
- bool operator()(const llm_bigram_bpe & l, const llm_bigram_bpe & r) const {
- return l.rank > r.rank || (l.rank == r.rank && l.left > r.left);
- }
- };
- using queue_storage = std::vector<llm_bigram_bpe>;
- using queue = llama_priority_queue<llm_bigram_bpe, queue_storage, comparator>;
- llm_symbol::index left;
- llm_symbol::index right;
- std::string text;
- int rank;
- size_t size;
- };
- struct llm_tokenizer_bpe : llm_tokenizer {
- llm_tokenizer_bpe(const llama_vocab & vocab) : llm_tokenizer() {
- GGML_ASSERT(vocab.type == LLAMA_VOCAB_TYPE_BPE);
- switch (vocab.type_pre) {
- case LLAMA_VOCAB_PRE_TYPE_LLAMA3:
- regex_exprs = {
- // original regex from tokenizer.json
- //"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
- // adapted: https://github.com/ggerganov/llama.cpp/pull/6920#issuecomment-2080233989
- "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
- };
- break;
- case LLAMA_VOCAB_PRE_TYPE_DBRX:
- case LLAMA_VOCAB_PRE_TYPE_SMAUG:
- regex_exprs = {
- // same as llama3
- "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
- };
- break;
- case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM:
- regex_exprs = {
- "[\r\n]",
- "\\s?[A-Za-zµÀ-ÖØ-öø-ƺƼ-ƿDŽ-ʓʕ-ʯͰ-ͳͶͷͻ-ͽͿΆΈ-ΊΌΎ-ΡΣ-ϵϷ-ҁҊ-ԯԱ-ՖႠ-ჅᎠ-Ᏽᏸ-ᏽᲐ-ᲺᲽ-Ჿᴀ-ᴫᵫ-ᵷᵹ-ᶚḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙὛὝὟ-ώᾀ-ᾴᾶ-ᾼιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼℂℇℊ-ℓℕℙ-ℝℤΩℨK-ℭℯ-ℴℹℼ-ℿⅅ-ⅉⅎↃↄⰀ-ⱻⱾ-ⳤⳫ-ⳮⳲⳳꙀ-ꙭꚀ-ꚛꜢ-ꝯꝱ-ꞇꞋ-ꞎꭰ-ꮿff-stﬓ-ﬗA-Za-z\U00010400-\U0001044f𐒰-𐓓𐓘-𐓻𐲀-𐲲𐳀-𐳲𑢠-𑣟𞤀-𞥃]+",
- "\\s?[!-/:-~!-/:-~‘-‟ -。]+",
- "\\s+$",
- "[一-龥ࠀ-一가-]+",
- "\\p{N}+",
- };
- break;
- case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER:
- regex_exprs = {
- "[\r\n]",
- "\\s?\\p{L}+",
- "\\s?\\p{P}+",
- "[一-龥ࠀ-一가-]+",
- "\\p{N}",
- };
- break;
- case LLAMA_VOCAB_PRE_TYPE_FALCON:
- regex_exprs = {
- "[\\p{P}\\$\\+<=>\\^~\\|`]+",
- "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
- "[0-9][0-9][0-9]",
- };
- break;
- case LLAMA_VOCAB_PRE_TYPE_STARCODER:
- case LLAMA_VOCAB_PRE_TYPE_REFACT:
- case LLAMA_VOCAB_PRE_TYPE_COMMAND_R:
- case LLAMA_VOCAB_PRE_TYPE_SMOLLM:
- case LLAMA_VOCAB_PRE_TYPE_CODESHELL:
- case LLAMA_VOCAB_PRE_TYPE_EXAONE:
- case LLAMA_VOCAB_PRE_TYPE_MINERVA:
- regex_exprs = {
- "\\p{N}",
- "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
- };
- break;
- case LLAMA_VOCAB_PRE_TYPE_GPT2:
- case LLAMA_VOCAB_PRE_TYPE_MPT:
- case LLAMA_VOCAB_PRE_TYPE_OLMO:
- case LLAMA_VOCAB_PRE_TYPE_JAIS:
- regex_exprs = {
- "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
- };
- break;
- case LLAMA_VOCAB_PRE_TYPE_STABLELM2:
- case LLAMA_VOCAB_PRE_TYPE_QWEN2:
- regex_exprs = {
- // original regex from tokenizer.json
- // "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
- "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
- };
- break;
- case LLAMA_VOCAB_PRE_TYPE_PORO:
- case LLAMA_VOCAB_PRE_TYPE_BLOOM:
- case LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH:
- regex_exprs = {
- " ?[^(\\s|.,!?…。,、।۔،)]+",
- };
- break;
- case LLAMA_VOCAB_PRE_TYPE_CHATGLM4:
- regex_exprs = {
- "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
- };
- break;
- case LLAMA_VOCAB_PRE_TYPE_VIKING:
- regex_exprs = {
- " ?[^(\\s|.,!?…。,、।۔،)]+",
- "\\p{N}",
- };
- break;
- case LLAMA_VOCAB_PRE_TYPE_TEKKEN:
- // original regex from tokenizer.json
- // "[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]*[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]+|[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]+[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]*|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
- regex_exprs = {
- "[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))*((?=[\\p{L}])([^A-Z]))+|[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))+((?=[\\p{L}])([^A-Z]))*|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
- };
- break;
- case LLAMA_VOCAB_PRE_TYPE_CHAMELEON:
- // Note: in theory, the special token (sentinel and image token) regex_exprs below
- // are unnecessary, as they are split in `tokenizer_st_partition` anyway.
- // However, since the upstream pre-tokenizer uses them, they are also
- // included here (see https://huggingface.co/facebook/chameleon-7b).
- regex_exprs = {
- "<sentinel:[0-9]+>", // Sentinel tokens
- "(IMGIMG)((A|B|C|D|E|F|G|H|I){1,4})Z", // Image tokens
- "([\\t\\n]| | )", // directly from tokenizer.json
- "\\p{N}", // Individual digits
- "[\\p{P}!-/:-@\\[-`{-~]", // Punctuation, Isolated
- "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
- };
- break;
- default:
- // default regex for BPE tokenization pre-processing
- regex_exprs = {
- "[\\p{P}\\$\\+<=>\\^~\\|]+",
- "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
- "\\p{N}+",
- "[0-9][0-9][0-9]",
- };
- break;
- }
- }
- std::vector<std::string> regex_exprs;
- };
- struct llm_tokenizer_bpe_session {
- llm_tokenizer_bpe_session(const llama_vocab & vocab) : vocab(vocab),
- bpe_tokenizer(static_cast<const llm_tokenizer_bpe *>(vocab.tokenizer)) {}
- static void append(const llama_vocab::id token_id, std::vector<llama_vocab::id> & output) {
- output.push_back(token_id);
- }
- bool append_bos(std::vector<llama_vocab::id> & output) const {
- if (vocab.tokenizer_add_bos) {
- GGML_ASSERT(vocab.special_bos_id != -1);
- output.push_back(vocab.special_bos_id);
- return true;
- }
- return false;
- }
- bool append_eos(std::vector<llama_vocab::id> & output) const {
- if (vocab.tokenizer_add_eos) {
- GGML_ASSERT(vocab.special_eos_id != -1);
- output.push_back(vocab.special_eos_id);
- return true;
- }
- return false;
- }
- void check_double_bos_eos(const std::vector<llama_vocab::id> & output) const {
- if (vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
- LLAMA_LOG_WARN(
- "%s: Added a BOS token to the prompt as specified by the model but the prompt "
- "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
- "Are you sure this is what you want?\n", __FUNCTION__);
- }
- if (vocab.tokenizer_add_eos && output.size() >= 2 && *(output.end()-2) == vocab.special_eos_id) {
- LLAMA_LOG_WARN(
- "%s: Added a EOS token to the prompt as specified by the model but the prompt "
- "also ends with a EOS token. So now the final prompt ends with 2 EOS tokens. "
- "Are you sure this is what you want?\n", __FUNCTION__);
- }
- }
- void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
- int final_prev_index = -1;
- const auto word_collection = unicode_regex_split(text, bpe_tokenizer->regex_exprs);
- symbols_final.clear();
- for (const auto & word : word_collection) {
- work_queue = llm_bigram_bpe::queue();
- symbols.clear();
- int index = 0;
- size_t offset = 0;
- if (vocab.tokenizer_ignore_merges && vocab.token_to_id.find(word) != vocab.token_to_id.end()) {
- symbols.emplace_back(llm_symbol{-1, -1, word.c_str(), word.size()});
- offset = word.size();
- }
- while (offset < word.size()) {
- llm_symbol sym;
- size_t char_len = std::min(word.size() - offset, (size_t) unicode_len_utf8(word[offset]));
- sym.text = word.c_str() + offset;
- sym.n = char_len;
- offset += sym.n;
- sym.prev = index - 1;
- sym.next = offset == word.size() ? -1 : index + 1;
- index++;
- symbols.emplace_back(sym);
- }
- for (int i = 1; i < (int) symbols.size(); ++i) {
- add_new_bigram(i - 1, i);
- }
- // build token(s)
- while (!work_queue.empty()) {
- auto bigram = work_queue.pop_move();
- auto & left_symbol = symbols[bigram.left];
- auto & right_symbol = symbols[bigram.right];
- if (left_symbol.n == 0 || right_symbol.n == 0) {
- continue;
- }
- std::string left_token = std::string(left_symbol.text, left_symbol.n);
- std::string right_token = std::string(right_symbol.text, right_symbol.n);
- if (left_token + right_token != bigram.text) {
- continue; // Skip this bigram if it's outdated
- }
- // merge the right sym into the left one
- left_symbol.n += right_symbol.n;
- right_symbol.n = 0;
- // remove the right sym from the chain
- left_symbol.next = right_symbol.next;
- if (right_symbol.next >= 0) {
- symbols[right_symbol.next].prev = bigram.left;
- }
- add_new_bigram(left_symbol.prev, bigram.left); // left side of current symbol
- add_new_bigram(bigram.left, left_symbol.next); // right side of current symbol
- }
- // add the finished tokens to the final list keeping correct order for next and prev
- for (auto & sym : symbols) {
- if (sym.n > 0) {
- sym.prev = final_prev_index;
- sym.next = -1;
- if (final_prev_index != -1) {
- symbols_final[final_prev_index].next = symbols_final.size();
- }
- symbols_final.emplace_back(sym);
- final_prev_index = symbols_final.size() - 1;
- }
- }
- }
- symbols = symbols_final;
- if (!symbols.empty()) {
- for (int i = 0; i != -1; i = symbols[i].next) {
- auto & symbol = symbols[i];
- if (symbol.n == 0) {
- continue;
- }
- const std::string str = std::string(symbol.text, symbol.n);
- const auto token = vocab.token_to_id.find(str);
- if (token == vocab.token_to_id.end()) {
- for (auto j = str.begin(); j != str.end(); ++j) {
- std::string byte_str(1, *j);
- auto token_multibyte = vocab.token_to_id.find(byte_str);
- if (token_multibyte != vocab.token_to_id.end()) {
- output.push_back(token_multibyte->second);
- }
- }
- } else {
- output.push_back((*token).second);
- }
- }
- }
- }
- private:
- void add_new_bigram(int left, int right) {
- if (left == -1 || right == -1) {
- return;
- }
- std::string left_token = std::string(symbols[left].text, symbols[left].n);
- std::string right_token = std::string(symbols[right].text, symbols[right].n);
- int rank_found = -1;
- rank_found = vocab.find_bpe_rank(left_token, right_token);
- if (rank_found < 0) {
- return;
- }
- llm_bigram_bpe bigram;
- bigram.left = left;
- bigram.right = right;
- bigram.text = left_token + right_token;
- bigram.size = left_token.size() + right_token.size();
- bigram.rank = rank_found;
- work_queue.push(bigram);
- }
- const llama_vocab & vocab;
- const llm_tokenizer_bpe * bpe_tokenizer;
- std::vector<llm_symbol> symbols;
- std::vector<llm_symbol> symbols_final;
- llm_bigram_bpe::queue work_queue;
- };
- //
- // WPM tokenizer
- //
- struct llm_tokenizer_wpm : llm_tokenizer {
- llm_tokenizer_wpm(const llama_vocab & /*vocab*/) : llm_tokenizer() {}
- };
- struct llm_tokenizer_wpm_session {
- llm_tokenizer_wpm_session(const llama_vocab & vocab) : vocab(vocab) {}
- void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
- const auto & token_map = vocab.token_to_id;
- // normalize and split by whitespace
- std::vector<std::string> words = preprocess(text);
- // bos token prepended already
- // find the longest tokens that form the words
- for (const std::string & word : words) {
- // skip empty words
- if (word.size() == 0) {
- continue;
- }
- // prepend phantom space
- const std::string word1 = "\xe2\x96\x81" + word;
- const int n = word1.size();
- const size_t current_tokens = output.size();
- // we're at the start of a new word
- // move through character position in word
- for (int i = 0; i < n; ++i) {
- // loop through possible match length
- bool match = false;
- for (int j = std::min(n, i + vocab.max_token_len + 1); j > i; j--) {
- auto it = token_map.find(word1.substr(i, j - i));
- if (it != token_map.end()) {
- output.push_back(it->second);
- match = true;
- i = j - 1;
- break;
- }
- }
- if (!match) { // discard all
- output.resize(current_tokens);
- break; // and discard next tokens
- }
- }
- // we didn't find any matches for this word
- if (current_tokens == output.size()) {
- output.push_back(vocab.special_unk_id);
- }
- }
- }
- // TODO: reduce string copies by using cpts_offs array
- static std::vector<std::string> preprocess(const std::string & text) {
- const std::vector<uint32_t> cpts_nfd = unicode_cpts_normalize_nfd(unicode_cpts_from_utf8(text));
- std::vector<std::string> words(1, "");
- for (const uint32_t cpt : cpts_nfd) {
- const auto flags = unicode_cpt_flags(cpt);
- if (flags.is_whitespace) {
- if (words.back().size()) { // finish previous word if any
- words.emplace_back();
- }
- continue;
- }
- assert (!flags.is_separator);
- if (cpt == 0 || cpt == 0xFFFD || flags.is_control) {
- continue;
- }
- const std::string s = unicode_cpt_to_utf8(unicode_tolower(cpt));
- if (flags.is_punctuation || ( cpt < 0x7F && flags.is_symbol ) || is_chinese_char(cpt)) {
- if (words.back().size()) { // finish previous word if any
- words.emplace_back();
- }
- words.back() = s; // single char word
- words.emplace_back(); // start a new word
- } else {
- words.back() += s; // append char to word
- }
- }
- if (!words.back().size()) {
- words.pop_back();
- }
- return words;
- }
- static bool is_chinese_char(uint32_t cpt) {
- return
- (cpt >= 0x04E00 && cpt <= 0x09FFF) ||
- (cpt >= 0x03400 && cpt <= 0x04DBF) ||
- (cpt >= 0x20000 && cpt <= 0x2A6DF) ||
- (cpt >= 0x2A700 && cpt <= 0x2B73F) ||
- (cpt >= 0x2B740 && cpt <= 0x2B81F) ||
- (cpt >= 0x2B920 && cpt <= 0x2CEAF) || // this should be 0x2B820 but in hf rust code it is 0x2B920
- (cpt >= 0x0F900 && cpt <= 0x0FAFF) ||
- (cpt >= 0x2F800 && cpt <= 0x2FA1F);
- //(cpt >= 0x3000 && cpt <= 0x303F) ||
- //(cpt >= 0xFF00 && cpt <= 0xFFEF);
- }
- private:
- const llama_vocab & vocab;
- // currently unused
- // const llm_tokenizer_wpm * wpm_tokenizer;
- };
- //
- // UGM tokenizer
- //
- struct llm_tokenizer_ugm : llm_tokenizer {
- llm_tokenizer_ugm(const llama_vocab & vocab) : llm_tokenizer() {
- if (vocab.precompiled_charsmap.size() > 0) {
- size_t charsmap_offset = 0;
- // First four bytes of precompiled_charsmap contains length of binary
- // blob containing XOR-compressed compact double array (XCDA) entries
- uint32_t xcda_blob_size = *(const uint32_t *) &vocab.precompiled_charsmap[0];
- charsmap_offset += sizeof(xcda_blob_size);
- if (xcda_blob_size + charsmap_offset >= vocab.precompiled_charsmap.size()) {
- throw std::runtime_error("Index out of array bounds in precompiled charsmap!");
- }
- // Next xcda_blob_size bytes contain entries of XOR-compressed compact
- // double array (XCDA). Each entry is bit-packed into a 32-bit integer.
- xcda_array = (const uint32_t *) &vocab.precompiled_charsmap[charsmap_offset];
- xcda_array_size = xcda_blob_size / sizeof(uint32_t);
- charsmap_offset += xcda_blob_size;
- // Remaining bytes of precompiled charsmap contain null-terminated
- // replacement strings for prefixes matched by the XCDA.
- prefix_replacements = &vocab.precompiled_charsmap[charsmap_offset];
- prefix_replacements_size = vocab.precompiled_charsmap.size() - charsmap_offset;
- }
- for (unsigned int id = 0; id < vocab.id_to_token.size(); ++id) {
- const auto &token_data = vocab.id_to_token[id];
- if (llama_is_normal_token(vocab, id)) {
- min_score = std::min<float>(min_score, token_data.score);
- max_score = std::max<float>(max_score, token_data.score);
- }
- if (llama_is_normal_token(vocab, id) ||
- llama_is_user_defined_token(vocab, id) ||
- llama_is_unused_token(vocab, id)) {
- token_matcher.insert(token_data.text.data(), token_data.text.size(), id);
- }
- if (llama_is_user_defined_token(vocab, id)) {
- user_defined_token_matcher.insert(token_data.text.data(), token_data.text.size());
- }
- }
- unknown_token_score = min_score - unknown_token_score_penalty;
- }
- // escaped space symbol - U+2581 (Lower One Eighth Block)
- const std::string escaped_space = "\xE2\x96\x81";
- const char * prefix_replacements = NULL;
- size_t prefix_replacements_size = 0;
- const uint32_t * xcda_array = NULL;
- size_t xcda_array_size = 0;
- struct naive_trie user_defined_token_matcher;
- float min_score = FLT_MAX;
- float max_score = -FLT_MAX;
- float unknown_token_score_penalty = 10.0;
- float unknown_token_score;
- struct naive_trie token_matcher;
- };
- struct llm_tokenizer_ugm_session {
- llm_tokenizer_ugm_session(const llama_vocab & vocab) : vocab(vocab),
- ugm_tokenizer(static_cast<const llm_tokenizer_ugm *>(vocab.tokenizer)) {}
- /* This implementation is based on SentencePiece optimized Viterbi algorithm for
- * unigram language models. The general idea is to:
- * - move along the input sequence in steps of one UTF code point,
- * - at each step find all possible tokenizations of the prefix by
- * traversing the tokens trie,
- * - for each tokenization store the best one so far (by higher score)
- * - use the position in sequence after given token as an index to store
- * results
- * - if there was no valid tokenization of the current UTF code point
- * then use unknown token with additional score penalty
- * After processing the whole sequence we backtrack from the end to get
- * the best tokenization.
- */
- void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
- // get current size of output (for reversal later)
- size_t output_size = output.size();
- // normalize the input first
- std::string normalized;
- normalize(text, &normalized);
- size_t input_len = normalized.size();
- if (input_len == 0) {
- return;
- }
- // initialize score_sum to -FLT_MAX so it will be always lower than sums of token scores
- std::vector<struct best_tokenization> tokenization_results(input_len + 1, {vocab.special_unk_id, 0, -FLT_MAX});
- // at the beginning tokenization score is zero
- tokenization_results[0] = { vocab.special_unk_id, 0, 0 };
- for (size_t input_offset = 0; input_offset < input_len;) {
- size_t prefix_offset = input_offset;
- // calculate how many code units are in the currently processed UTF code point
- size_t n_utf8_code_units = std::min<size_t>(unicode_len_utf8(normalized[input_offset]), input_len - input_offset);
- // traverse the token matcher trie to find a matching token
- bool single_codepoint_token_found = false;
- const struct best_tokenization & current_best = tokenization_results[input_offset];
- const struct naive_trie * node = ugm_tokenizer->token_matcher.traverse(normalized[prefix_offset++]);
- while (prefix_offset <= input_len && node != NULL) {
- // check if we found valid token in prefix
- if (node->has_value) {
- // check if it corresponds to the whole UTF code point
- if (prefix_offset - input_offset == n_utf8_code_units) {
- single_codepoint_token_found = true;
- }
- llama_token token_id = node->value;
- const auto & token_data = vocab.id_to_token[token_id];
- // we set the user-defined token scores to 0 to make them more likely to be selected
- // (normal token scores are log probabilities, so they are negative)
- // score type is double here to make tokenization results exactly
- // the same as in the HF tokenizer using SentencePiece
- const double token_score = llama_is_user_defined_token(vocab, token_id) ? 0.0 : token_data.score;
- const double challenger_score = current_best.score_sum + token_score;
- struct best_tokenization & current_champ = tokenization_results[prefix_offset];
- if (challenger_score > current_champ.score_sum) {
- struct best_tokenization challenger = { token_id, input_offset, (float) challenger_score };
- current_champ = challenger;
- }
- }
- node = node->traverse(normalized[prefix_offset++]);
- }
- // if we didn't find a valid token corresponding to the whole UTF code point
- // then use unknown token as the tokenization of this UTF code point
- if (!single_codepoint_token_found) {
- const double challenger_score = current_best.score_sum + ugm_tokenizer->unknown_token_score;
- prefix_offset = input_offset + n_utf8_code_units;
- struct best_tokenization & current_champ = tokenization_results[prefix_offset];
- if (challenger_score > current_champ.score_sum) {
- struct best_tokenization challenger = { vocab.special_unk_id, input_offset, (float) challenger_score };
- current_champ = challenger;
- }
- }
- // move to the next UTF code point
- input_offset += n_utf8_code_units;
- }
- // now backtrack from the end to gather token ids of the best tokenization
- // merge sequences of consecutive unknown tokens into single unknown tokens
- bool is_prev_unknown = false;
- for (struct best_tokenization & tokenization = tokenization_results[input_len]; ; tokenization = tokenization_results[tokenization.input_offset]) {
- bool is_unknown = tokenization.token_id == vocab.special_unk_id;
- if (!(is_prev_unknown && is_unknown)) {
- output.push_back(tokenization.token_id);
- }
- if (tokenization.input_offset == 0) {
- break;
- }
- is_prev_unknown = is_unknown;
- }
- // reverse the output since we added tokens starting from the end of the input
- std::reverse(output.begin() + output_size, output.end());
- }
- private:
- // helper structure for returning normalization results
- struct normalization_result {
- const char * normalized;
- size_t normalized_len;
- size_t consumed_input;
- };
- void normalize(const std::string& input, std::string * normalized) {
- normalized->clear();
- normalized->reserve(input.size() * 3);
- const std::string space = vocab.tokenizer_escape_whitespaces ? ugm_tokenizer->escaped_space : " ";
- bool shall_prepend_space = !vocab.tokenizer_treat_whitespace_as_suffix && vocab.tokenizer_add_space_prefix;
- bool shall_append_space = vocab.tokenizer_treat_whitespace_as_suffix && vocab.tokenizer_add_space_prefix;
- bool shall_merge_spaces = vocab.tokenizer_remove_extra_whitespaces;
- bool is_space_prepended = false;
- bool processing_non_ws = false;
- size_t input_len = input.size();
- for (size_t input_offset = 0; input_offset < input_len; ) {
- auto norm_res = normalize_prefix(input, input_offset);
- for (size_t i = 0; i < norm_res.normalized_len; i++) {
- char c = norm_res.normalized[i];
- if (c != ' ') {
- if (!processing_non_ws) {
- processing_non_ws = true;
- if ((shall_prepend_space && !is_space_prepended) || shall_merge_spaces) {
- normalized->append(space);
- is_space_prepended = true;
- }
- }
- normalized->push_back(c);
- } else {
- if (processing_non_ws) {
- processing_non_ws = false;
- }
- if (!shall_merge_spaces) {
- normalized->append(space);
- }
- }
- }
- input_offset += norm_res.consumed_input;
- }
- if (shall_append_space) {
- normalized->append(space);
- }
- }
- /*
- * This structure is a view wrapper for XOR-compressed double array (XCDA)
- * See Shunsuke Kanda (2018). Space- and Time-Efficient String Dictionaries.
- * Each bit-packed entry contains:
- * - BASE array value in bits 10-30
- * - LCHECK array value in bits 0-7
- * - LEAF array value in bit 9
- * Entries containing indexes of replacement sequences have set bit 31
- */
- struct xcda_array_view {
- public:
- xcda_array_view(const uint32_t * xcda_array, size_t xcda_array_size) : xcda_array(xcda_array), xcda_array_size(xcda_array_size) {
- }
- uint32_t get_base(size_t index) {
- uint32_t packed_node = get_node(index);
- return (packed_node >> 10) << ((packed_node & (1U << 9)) >> 6);
- }
- uint32_t get_lcheck(size_t index) {
- uint32_t packed_node = get_node(index);
- return packed_node & ((1U << 31) | 0xff);
- }
- bool get_leaf(size_t index) {
- uint32_t packed_node = get_node(index);
- return (packed_node >> 8) & 1;
- }
- uint32_t get_value(size_t index) {
- uint32_t packed_node = get_node(index);
- return packed_node & ((1U << 31) - 1);
- }
- private:
- uint32_t get_node(size_t index) {
- if (index > xcda_array_size) {
- throw std::runtime_error("Index out of array bounds in XCDA array!");
- }
- return xcda_array[index];
- }
- const uint32_t * xcda_array;
- size_t xcda_array_size;
- };
- // this structure stores the best tokenization so far at input_offset
- struct best_tokenization {
- llama_token token_id;
- size_t input_offset;
- float score_sum;
- };
- struct normalization_result normalize_prefix(const std::string & input, size_t input_offset) {
- if (input_offset == input.size()) {
- return { &input[input_offset], 0, 0 };
- }
- // if input prefix matches some user-defined token return this token as normalization result
- auto user_defined_token_match =
- ugm_tokenizer->user_defined_token_matcher.get_longest_prefix(&input[input_offset], input.size() - input_offset);
- if (user_defined_token_match.second > 0) {
- return { &input[input_offset], user_defined_token_match.second, user_defined_token_match.second };
- }
- size_t longest_prefix_length = 0;
- size_t longest_prefix_offset = 0;
- if (ugm_tokenizer->xcda_array_size > 0) {
- struct xcda_array_view xcda_view(ugm_tokenizer->xcda_array, ugm_tokenizer->xcda_array_size);
- // Find the longest normalized sequence matching the input prefix by walking
- // the XOR-compressed compact double array (XCDA) starting from the root node
- // We find the index of the next node by calculating BASE[s] ^ c where s is
- // the index of the previous node and c is a numerical character value
- uint32_t node_index = 0;
- // get BASE of the root node
- node_index = xcda_view.get_base(node_index);
- for (size_t prefix_offset = input_offset; prefix_offset < input.size(); prefix_offset++) {
- unsigned char c = input[prefix_offset];
- if (c == 0) {
- break;
- }
- node_index ^= c;
- // if value of LCHECK is not c it means that this is not a child of
- // the previous node, so we stop matching
- if (xcda_view.get_lcheck(node_index) != c) {
- break;
- }
- bool is_leaf = xcda_view.get_leaf(node_index);
- // get BASE of the current node
- node_index ^= xcda_view.get_base(node_index);
- // if LEAF of the current node is true, it means that its BASE points to the node
- // containing index of replacement sequence for currently matched input prefix
- if (is_leaf)
- {
- longest_prefix_length = prefix_offset - input_offset + 1;
- // get index of replacement sequence for currently matched input prefix
- longest_prefix_offset = xcda_view.get_value(node_index);
- }
- }
- }
- if (longest_prefix_length > 0) {
- // we have a match, so return the replacement sequence
- if (longest_prefix_offset >= ugm_tokenizer->prefix_replacements_size) {
- throw std::runtime_error("Index out of array bounds in precompiled charsmap!");
- }
- const char * prefix_replacement = &(ugm_tokenizer->prefix_replacements)[longest_prefix_offset];
- return { prefix_replacement, strlen(prefix_replacement), longest_prefix_length };
- }
- // check if the input prefix contains a valid sequence of UTF-8 code units
- try {
- // if yes, return this sequence unmodified
- size_t prefix_offset = input_offset;
- unicode_cpt_from_utf8(input, prefix_offset);
- return { &input[input_offset], prefix_offset - input_offset, prefix_offset - input_offset };
- } catch (std::invalid_argument & /*ex*/) {
- // if no, consume 1 byte and return U+FFFD - REPLACEMENT CHARACTER
- return { "\xEF\xBF\xBD", 3, 1 };
- }
- }
- const llama_vocab & vocab;
- const llm_tokenizer_ugm * ugm_tokenizer;
- };
- //
- // RWKV tokenizer
- //
- static std::vector<uint8_t> llama_unescape_rwkv_token(const std::string & escaped) {
- std::vector<uint8_t> output;
- output.reserve(escaped.size());
- // Parser state
- bool escaping = false;
- uint8_t hex_remaining = 0;
- uint8_t hex_acc = 0;
- // Step through characters, performing parsing
- for (const char & c : escaped) {
- // If we're parsing a hex code, interpret the next character
- if (hex_remaining != 0) {
- uint8_t value = (c >= 'a') ? (c - 'a' + 10) : (c - '0');
- hex_acc = (hex_acc << 4) + value;
- hex_remaining -= 1;
- if (hex_remaining == 0) {
- output.push_back(hex_acc);
- hex_acc = 0;
- }
- continue;
- }
- // If we got an escape character, interpret it
- if (escaping) {
- if (c == 't') {
- output.push_back('\t');
- } else if (c == 'n') {
- output.push_back('\n');
- } else if (c == 'r') {
- output.push_back('\r');
- } else if (c == 'x') {
- hex_remaining = 2;
- } else {
- output.push_back(c);
- }
- escaping = false;
- continue;
- }
- if (c == '\\') {
- escaping = true;
- continue;
- }
- output.push_back(c);
- }
- return output;
- }
- struct llm_tokenizer_rwkv : llm_tokenizer {
- llm_tokenizer_rwkv(const llama_vocab & vocab) : llm_tokenizer() {
- // RWKV supports arbitrary byte tokens, but the vocab struct only supports string tokens.
- // For now, we decode the vocab here into the lookup we'll use for tokenization.
- // build trie
- for (unsigned int id = 0; id < vocab.id_to_token.size(); ++id) {
- const auto & token = vocab.id_to_token[id];
- const auto data = llama_unescape_rwkv_token(token.text);
- token_matcher.insert((const char *) data.data(), data.size(), id);
- }
- }
- struct naive_trie token_matcher;
- };
- struct llm_tokenizer_rwkv_session {
- llm_tokenizer_rwkv_session(const llama_vocab & vocab) : vocab(vocab),
- rwkv_tokenizer(static_cast<const llm_tokenizer_rwkv &>(*vocab.tokenizer)) {}
- void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
- uint32_t position = 0;
- while (position < text.size()) {
- const struct naive_trie * node = rwkv_tokenizer.token_matcher.traverse(text[position]);
- if (node == NULL) {
- // no matching token found, add unknown token
- output.push_back(vocab.special_unk_id);
- position += 1;
- continue;
- }
- // traverse the trie to find the longest matching token
- uint32_t token_id = 0;
- uint32_t token_length = 0;
- while (node != NULL) {
- if (node->has_value) {
- token_id = node->value;
- token_length = position + 1;
- }
- node = node->traverse(text[++position]);
- }
- // add the longest matching token
- output.push_back(token_id);
- position = token_length;
- }
- }
- private:
- const llama_vocab & vocab;
- const llm_tokenizer_rwkv & rwkv_tokenizer;
- };
- void llama_vocab::init_tokenizer() {
- switch (type) {
- case LLAMA_VOCAB_TYPE_SPM:
- tokenizer = new llm_tokenizer_spm(*this);
- break;
- case LLAMA_VOCAB_TYPE_BPE:
- tokenizer = new llm_tokenizer_bpe(*this);
- break;
- case LLAMA_VOCAB_TYPE_WPM:
- tokenizer = new llm_tokenizer_wpm(*this);
- break;
- case LLAMA_VOCAB_TYPE_UGM:
- tokenizer = new llm_tokenizer_ugm(*this);
- break;
- case LLAMA_VOCAB_TYPE_RWKV:
- tokenizer = new llm_tokenizer_rwkv(*this);
- break;
- default:
- GGML_ABORT("unsupported vocab type");
- }
- }
- //
- // (de-) tokenize
- //
- typedef enum FRAGMENT_BUFFER_VARIANT_TYPE {
- FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN,
- FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT
- } FRAGMENT_BUFFER_VARIANT_TYPE;
- struct fragment_buffer_variant {
- fragment_buffer_variant(llama_vocab::id _token)
- :
- type(FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN),
- token(_token),
- raw_text(_dummy),
- offset(0),
- length(0) {}
- fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
- :
- type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
- token((llama_vocab::id) - 1),
- raw_text(_raw_text),
- offset(_offset),
- length(_length){
- GGML_ASSERT(_offset >= 0);
- GGML_ASSERT(_length >= 1);
- GGML_ASSERT(offset + length <= raw_text.length());
- }
- const FRAGMENT_BUFFER_VARIANT_TYPE type;
- const llama_vocab::id token;
- const std::string _dummy;
- const std::string & raw_text;
- const uint64_t offset;
- const uint64_t length;
- };
- // #define PRETOKENIZERDEBUG
- static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer, bool parse_special) {
- // for each special token
- for (const llama_vocab::id special_id : vocab.cache_special_tokens) {
- const auto & data = vocab.id_to_token[special_id];
- const auto & special_token = data.text;
- if (!parse_special && (data.attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_UNKNOWN))) {
- // Ignore control and unknown tokens when parse_special == false
- continue;
- // User-defined tokens are still pre-tokenized before everything else
- // ref: https://github.com/huggingface/tokenizers/blob/fdd26ba9a3f0c133427aab0423888cbde91362d7/tokenizers/src/tokenizer/mod.rs#L726
- // This is mostly relevant for neox-style tokenizers (mpt, olmo, stablelm, etc.)
- }
- // for each text fragment
- std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
- while (it != buffer.end()) {
- auto & fragment = (*it);
- // if a fragment is text ( not yet processed )
- if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
- const auto & raw_text = fragment.raw_text;
- auto raw_text_base_offset = fragment.offset;
- auto raw_text_base_length = fragment.length;
- // loop over the text
- while (true) {
- // find the first occurrence of a given special token in this fragment
- // passing offset argument only limit the "search area" but match coordinates
- // are still relative to the source full raw_text
- auto match = raw_text.find(special_token, raw_text_base_offset);
- // no occurrences found, stop processing this fragment for a given special token
- if (match == std::string::npos) break;
- // check if match is within bounds of offset <-> length
- if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
- #ifdef PRETOKENIZERDEBUG
- LLAMA_LOG_WARN("FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
- #endif
- auto source = std::distance(buffer.begin(), it);
- // if match is further than base offset
- // then we have some text to the left of it
- if (match > raw_text_base_offset) {
- // left
- const int64_t left_reminder_offset = raw_text_base_offset + 0;
- int64_t left_reminder_length = match - raw_text_base_offset;
- if (data.attr & LLAMA_TOKEN_ATTR_LSTRIP) {
- while (left_reminder_length > 0 && isspace(raw_text[left_reminder_offset + left_reminder_length - 1])) {
- left_reminder_length--;
- }
- }
- if (left_reminder_length > 0) {
- buffer.emplace_after(it, raw_text, left_reminder_offset, left_reminder_length);
- it++;
- }
- #ifdef PRETOKENIZERDEBUG
- LLAMA_LOG_WARN("FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
- #endif
- }
- // special token
- buffer.emplace_after(it, special_id);
- it++;
- // right
- if (match + special_token.length() < raw_text_base_offset + raw_text_base_length) {
- int64_t right_reminder_offset = match + special_token.length();
- int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
- if (data.attr & LLAMA_TOKEN_ATTR_RSTRIP) {
- while (right_reminder_length > 0 && isspace(raw_text[right_reminder_offset])) {
- right_reminder_offset++;
- right_reminder_length--;
- }
- }
- if (right_reminder_length > 0) {
- buffer.emplace_after(it, raw_text, right_reminder_offset, right_reminder_length);
- it++;
- }
- #ifdef PRETOKENIZERDEBUG
- LLAMA_LOG_WARN("FR: (%ld %ld) '%s'\n", right_reminder_offset, right_reminder_length, raw_text->substr(right_reminder_offset, right_reminder_length).c_str());
- #endif
- if (source == 0) {
- buffer.erase_after(buffer.before_begin());
- } else {
- buffer.erase_after(std::next(buffer.begin(), (source-1)));
- }
- // repeat for the right side
- raw_text_base_offset = right_reminder_offset;
- raw_text_base_length = right_reminder_length;
- #ifdef PRETOKENIZERDEBUG
- LLAMA_LOG_WARN("RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
- #endif
- } else {
- if (source == 0) {
- buffer.erase_after(buffer.before_begin());
- } else {
- buffer.erase_after(std::next(buffer.begin(), (source-1)));
- }
- break;
- }
- }
- }
- it++;
- }
- }
- }
- std::vector<llama_vocab::id> llama_tokenize_internal(
- const llama_vocab & vocab,
- std::string raw_text,
- bool add_special,
- bool parse_special) {
- GGML_ASSERT(vocab.tokenizer && "Tokenizer not initialized. Call llama_vocab::init_tokenizer() first.");
- std::vector<llama_vocab::id> output;
- std::forward_list<fragment_buffer_variant> fragment_buffer;
- if (!raw_text.empty()) {
- fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
- tokenizer_st_partition(vocab, fragment_buffer, parse_special);
- }
- switch (vocab.type) {
- case LLAMA_VOCAB_TYPE_SPM:
- {
- // OG tokenizer behavior:
- //
- // tokenizer.encode('', add_special_tokens=True) returns [1]
- // tokenizer.encode('', add_special_tokens=False) returns []
- bool is_prev_special = true; // prefix with space if first token
- if (add_special && vocab.tokenizer_add_bos) {
- GGML_ASSERT(vocab.special_bos_id != -1);
- output.push_back(vocab.special_bos_id);
- is_prev_special = true;
- }
- for (const auto & fragment : fragment_buffer) {
- if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
- auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
- // prefix with space if previous is special
- if (vocab.tokenizer_add_space_prefix && is_prev_special) {
- raw_text = " " + raw_text;
- }
- #ifdef PRETOKENIZERDEBUG
- LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
- #endif
- llama_escape_whitespace(raw_text);
- llm_tokenizer_spm_session session(vocab);
- session.tokenize(raw_text, output);
- is_prev_special = false;
- } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
- output.push_back(fragment.token);
- is_prev_special = true;
- }
- }
- if (add_special && vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
- LLAMA_LOG_WARN(
- "%s: Added a BOS token to the prompt as specified by the model but the prompt "
- "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
- "Are you sure this is what you want?\n", __FUNCTION__);
- }
- if (add_special && vocab.tokenizer_add_eos) {
- GGML_ASSERT(vocab.special_eos_id != -1);
- output.push_back(vocab.special_eos_id);
- }
- } break;
- case LLAMA_VOCAB_TYPE_BPE:
- {
- llm_tokenizer_bpe_session session(vocab);
- // it calls some other methods that are not exist in llm_tokenizer,
- // here just cast it to bpe tokenizer object
- if (add_special) {
- session.append_bos(output);
- }
- for (const auto & fragment : fragment_buffer) {
- if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
- auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
- #ifdef PRETOKENIZERDEBUG
- LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
- #endif
- session.tokenize(raw_text, output);
- } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
- session.append(fragment.token, output);
- }
- }
- if (add_special) {
- session.append_eos(output);
- session.check_double_bos_eos(output);
- }
- } break;
- case LLAMA_VOCAB_TYPE_WPM:
- {
- if (add_special) {
- GGML_ASSERT(vocab.special_cls_id != -1);
- output.push_back(vocab.special_cls_id);
- }
- llm_tokenizer_wpm_session session(vocab);
- for (const auto & fragment : fragment_buffer) {
- if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
- auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
- #ifdef PRETOKENIZERDEBUG
- LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
- #endif
- session.tokenize(raw_text, output);
- } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
- output.push_back(fragment.token);
- }
- }
- if (add_special) {
- GGML_ASSERT(vocab.special_sep_id != -1);
- output.push_back(vocab.special_sep_id);
- }
- } break;
- case LLAMA_VOCAB_TYPE_UGM:
- {
- if (add_special && vocab.tokenizer_add_bos) {
- GGML_ASSERT(vocab.special_bos_id != -1);
- output.push_back(vocab.special_bos_id);
- }
- llm_tokenizer_ugm_session session(vocab);
- for (const auto & fragment : fragment_buffer) {
- if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
- auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
- #ifdef PRETOKENIZERDEBUG
- LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
- #endif
- session.tokenize(raw_text, output);
- } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
- output.push_back(fragment.token);
- }
- }
- if (add_special && vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
- LLAMA_LOG_WARN(
- "%s: Added a BOS token to the prompt as specified by the model but the prompt "
- "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
- "Are you sure this is what you want?\n", __FUNCTION__);
- }
- if (add_special && vocab.tokenizer_add_eos) {
- GGML_ASSERT(vocab.special_eos_id != -1);
- output.push_back(vocab.special_eos_id);
- }
- } break;
- case LLAMA_VOCAB_TYPE_RWKV:
- {
- llm_tokenizer_rwkv_session session(vocab);
- for (const auto & fragment : fragment_buffer) {
- if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
- auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
- #ifdef PRETOKENIZERDEBUG
- LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
- #endif
- session.tokenize(raw_text, output);
- } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
- output.push_back(fragment.token);
- }
- }
- } break;
- case LLAMA_VOCAB_TYPE_NONE:
- GGML_ABORT("fatal error");
- }
- return output;
- }
- llama_token llama_byte_to_token_impl(const llama_vocab & vocab, uint8_t ch) {
- GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
- static const char * hex = "0123456789ABCDEF";
- switch (llama_vocab_get_type(vocab)) {
- case LLAMA_VOCAB_TYPE_SPM:
- case LLAMA_VOCAB_TYPE_UGM: {
- const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
- auto token = vocab.token_to_id.find(buf);
- if (token != vocab.token_to_id.end()) {
- return (*token).second;
- }
- // Try to fall back to just the byte as a string
- const char buf2[2] = { (char)ch, 0 };
- return vocab.token_to_id.at(buf2);
- }
- case LLAMA_VOCAB_TYPE_WPM:
- case LLAMA_VOCAB_TYPE_BPE: {
- return vocab.token_to_id.at(unicode_byte_to_utf8(ch));
- }
- default:
- GGML_ABORT("fatal error");
- }
- }
- const char * llama_token_get_text_impl(const struct llama_vocab & vocab, llama_token token) {
- GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
- return vocab.id_to_token[token].text.c_str();
- }
- float llama_token_get_score_impl(const struct llama_vocab & vocab, llama_token token) {
- GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
- return vocab.id_to_token[token].score;
- }
- llama_token_attr llama_token_get_attr_impl(const struct llama_vocab & vocab, llama_token token) {
- GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
- return vocab.id_to_token[token].attr;
- }
- bool llama_token_is_eog_impl(const struct llama_vocab & vocab, llama_token token) {
- return token != -1 && vocab.special_eog_ids.count(token) > 0;
- }
- bool llama_token_is_control_impl(const struct llama_vocab & vocab, llama_token token) {
- return llama_is_control_token(vocab, token);
- }
- llama_token llama_token_bos_impl(const struct llama_vocab & vocab) {
- return vocab.special_bos_id;
- }
- llama_token llama_token_eos_impl(const struct llama_vocab & vocab) {
- return vocab.special_eos_id;
- }
- llama_token llama_token_eot_impl(const struct llama_vocab & vocab) {
- return vocab.special_eot_id;
- }
- llama_token llama_token_eom_impl(const struct llama_vocab & vocab) {
- return vocab.special_eom_id;
- }
- llama_token llama_token_cls_impl(const struct llama_vocab & vocab) {
- return vocab.special_cls_id;
- }
- llama_token llama_token_sep_impl(const struct llama_vocab & vocab) {
- return vocab.special_sep_id;
- }
- llama_token llama_token_nl_impl(const struct llama_vocab & vocab) {
- return vocab.linefeed_id;
- }
- llama_token llama_token_pad_impl(const struct llama_vocab & vocab) {
- return vocab.special_pad_id;
- }
- bool llama_add_bos_token_impl(const struct llama_vocab & vocab) {
- return vocab.tokenizer_add_bos;
- }
- bool llama_add_eos_token_impl(const struct llama_vocab & vocab) {
- return vocab.tokenizer_add_eos;
- }
- llama_token llama_token_prefix_impl(const struct llama_vocab & vocab) {
- return vocab.special_fim_pre_id;
- }
- llama_token llama_token_middle_impl(const struct llama_vocab & vocab) {
- return vocab.special_fim_mid_id;
- }
- llama_token llama_token_suffix_impl(const struct llama_vocab & vocab) {
- return vocab.special_fim_suf_id;
- }
- llama_token llama_token_fim_pre_impl(const struct llama_vocab & vocab) {
- return vocab.special_fim_pre_id;
- }
- llama_token llama_token_fim_suf_impl(const struct llama_vocab & vocab) {
- return vocab.special_fim_suf_id;
- }
- llama_token llama_token_fim_mid_impl(const struct llama_vocab & vocab) {
- return vocab.special_fim_mid_id;
- }
- llama_token llama_token_fim_pad_impl(const struct llama_vocab & vocab) {
- return vocab.special_fim_pad_id;
- }
- llama_token llama_token_fim_rep_impl(const struct llama_vocab & vocab) {
- return vocab.special_fim_rep_id;
- }
- llama_token llama_token_fim_sep_impl(const struct llama_vocab & vocab) {
- return vocab.special_fim_sep_id;
- }
- int32_t llama_tokenize_impl(
- const struct llama_vocab & vocab,
- const char * text,
- int32_t text_len,
- llama_token * tokens,
- int32_t n_tokens_max,
- bool add_special,
- bool parse_special) {
- auto res = llama_tokenize_internal(vocab, std::string(text, text_len), add_special, parse_special);
- if (n_tokens_max < (int) res.size()) {
- // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
- return -((int) res.size());
- }
- for (size_t i = 0; i < res.size(); i++) {
- tokens[i] = res[i];
- }
- return res.size();
- }
- static std::string llama_decode_text(const std::string & text) {
- std::string decoded_text;
- const auto cpts = unicode_cpts_from_utf8(text);
- for (const auto cpt : cpts) {
- const auto utf8 = unicode_cpt_to_utf8(cpt);
- try {
- decoded_text += unicode_utf8_to_byte(utf8);
- } catch (const std::out_of_range & /*e*/) {
- decoded_text += "[UNK_BYTE_0x";
- for (const auto c : utf8) {
- decoded_text += format("%02x", (uint8_t) c);
- }
- decoded_text += text + "]";
- }
- }
- return decoded_text;
- }
- // does not write null-terminator to buf
- int32_t llama_token_to_piece_impl(const struct llama_vocab & vocab, llama_token token, char * buf, int32_t length, int32_t lstrip, bool special) {
- // ref: https://github.com/ggerganov/llama.cpp/pull/7587#discussion_r1620983843
- static const int attr_special = LLAMA_TOKEN_ATTR_UNKNOWN | LLAMA_TOKEN_ATTR_CONTROL;
- const llama_token_attr attr = llama_token_get_attr_impl(vocab, token);
- if (!special && (attr & attr_special)) {
- return 0;
- }
- // copy piece chars to output text buffer
- // skip up to 'lstrip' leading spaces before copying
- auto _try_copy = [=] (const char * token, size_t size) -> int32_t {
- for (int32_t i = 0; i < lstrip && size && *token == ' '; ++i) {
- token++;
- size--;
- }
- if (length < (int32_t)size) {
- return -(int32_t) size;
- }
- memcpy(buf, token, size);
- return (int32_t) size;
- };
- // if we have a cache - use it
- {
- const auto & cache = vocab.cache_token_to_piece;
- if (!cache.empty()) {
- const auto & result = cache.at(token);
- return _try_copy(result.data(), result.size());
- }
- }
- if (0 <= token && token < (int32_t) vocab.id_to_token.size()) {
- const std::string & token_text = vocab.id_to_token[token].text;
- switch (llama_vocab_get_type(vocab)) {
- case LLAMA_VOCAB_TYPE_WPM:
- case LLAMA_VOCAB_TYPE_SPM:
- case LLAMA_VOCAB_TYPE_UGM: {
- // NOTE: we accept all unsupported token types,
- // suppressing them like CONTROL tokens.
- if (attr & (attr_special | LLAMA_TOKEN_ATTR_USER_DEFINED)) {
- return _try_copy(token_text.data(), token_text.size());
- }
- if (attr & LLAMA_TOKEN_ATTR_NORMAL) {
- std::string result = token_text;
- llama_unescape_whitespace(result);
- return _try_copy(result.data(), result.size());
- }
- if (attr & LLAMA_TOKEN_ATTR_BYTE) {
- char byte = (char) llama_token_to_byte(vocab, token);
- return _try_copy((char*) &byte, 1);
- }
- break;
- }
- case LLAMA_VOCAB_TYPE_BPE: {
- // NOTE: we accept all unsupported token types,
- // suppressing them like CONTROL tokens.
- if (attr & (attr_special | LLAMA_TOKEN_ATTR_USER_DEFINED)) {
- return _try_copy(token_text.data(), token_text.size());
- }
- if (attr & LLAMA_TOKEN_ATTR_NORMAL) {
- std::string result = llama_decode_text(token_text);
- return _try_copy(result.data(), result.size());
- }
- break;
- }
- case LLAMA_VOCAB_TYPE_RWKV: {
- std::vector<uint8_t> result = llama_unescape_rwkv_token(token_text);
- // If we don't have enough space, return an error
- if (result.size() > (size_t)length) {
- return -(int)result.size();
- }
- memcpy(buf, result.data(), result.size());
- return (int)result.size();
- }
- default:
- GGML_ABORT("fatal error");
- }
- }
- return 0;
- }
- int32_t llama_detokenize_impl(
- const struct llama_vocab & vocab,
- const llama_token * tokens,
- int32_t n_tokens,
- char * text,
- int32_t text_len_max,
- bool remove_special,
- bool unparse_special) {
- GGML_ASSERT(vocab.tokenizer && "Tokenizer not initialized. Call llama_vocab::init_tokenizer() first.");
- int32_t avail = text_len_max;
- int32_t total = 0;
- // remove the leading space
- bool remove_space = vocab.tokenizer_add_space_prefix;
- if (remove_special && vocab.tokenizer_add_bos) {
- if (n_tokens > 0 && tokens[0] == vocab.special_bos_id) {
- remove_space = false;
- n_tokens--;
- tokens++;
- }
- }
- if (remove_special && vocab.tokenizer_add_eos) {
- if (n_tokens > 0 && tokens[n_tokens-1] == vocab.special_eos_id) {
- n_tokens--;
- }
- }
- for (int32_t i = 0; i < n_tokens; ++i) {
- GGML_ASSERT(avail >= 0);
- int32_t n_chars = llama_token_to_piece_impl(vocab, tokens[i], text, avail, remove_space, unparse_special);
- remove_space = false;
- if (n_chars < 0) {
- avail = 0;
- total -= n_chars;
- } else if (n_chars > 0) {
- avail -= n_chars;
- text += n_chars;
- total += n_chars;
- }
- }
- if (total > text_len_max) {
- return -total;
- }
- if (vocab.tokenizer_clean_spaces) {
- text -= total; // restart text
- // first pass: characters ?!., //TODO: where do these characters come from?
- const int32_t total1 = total;
- total = total ? 1 : 0;
- for (int32_t i = 1; i < total1; ++i) {
- const char x = text[i];
- if (text[i - 1] == ' ') {
- if (x == '?' || x == '!' || x == '.' || x == ',') { // " ?", " !", " .", " ,"
- total--; // remove space
- }
- }
- text[total++] = x;
- }
- // second pass: strip single apostrophe between spaces
- const int32_t total2 = total;
- total = total ? 1 : 0;
- for (int32_t i = 1; i < total2; ++i) {
- const char x = text[i];
- if (x == '\'' && i + 1 < total2 && text[i - 1] == ' ' && text[i + 1] == ' ') { // " ' "
- total--; // remove prev space
- text[++i] = '\0'; // remove next space
- }
- text[total++] = x;
- }
- // third pass: apostrophe contractions //NOTE: this makes sense?
- const int32_t total3 = total;
- total = total ? 1 : 0;
- for (int32_t i = 1; i < total3; ++i) {
- const char x = text[i];
- if (text[i - 1] == ' ') {
- if (x == '\'' && i + 1 < total3) {
- const char x1 = text[i + 1];
- if (x1 == 't' || x1 == 'd') { // " 't", " 'd"
- //total--; // remove space
- } else if (x1 == 's' || x1 == 'm') { // " 's", " 'm"
- total--; // remove space
- } else if (i + 2 < total3) {
- const char x2 = text[i + 2];
- if ((x1 == 'l' && x2 == 'l')) { // " 'll"
- //total--; // remove space
- } else if ((x1 == 'r' && x2 == 'e') || (x1 == 'v' && x2 == 'e')) { // " 're", " 've"
- total--; // remove space
- } else {
- //total--; // remove space
- }
- } else {
- //total--; // remove space
- }
- }
- }
- text[total++] = x;
- }
- }
- return total <= text_len_max ? total : -total;
- }
- std::string llama_detokenize(const struct llama_vocab & vocab, const std::vector<llama_token> & tokens, bool special) {
- std::string text;
- text.resize(std::max(text.capacity(), tokens.size()));
- int32_t n_chars = llama_detokenize_impl(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
- if (n_chars < 0) {
- text.resize(-n_chars);
- n_chars = llama_detokenize_impl(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
- GGML_ASSERT(n_chars <= (int32_t)text.size()); // whitespace trimming is performed after per-token detokenization
- }
- text.resize(n_chars);
- // NOTE: the original tokenizer decodes bytes after collecting the pieces.
- return text;
- }
|