cui-llama.rn 1.2.6 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/README.md +3 -2
  2. package/android/src/main/CMakeLists.txt +20 -5
  3. package/android/src/main/java/com/rnllama/LlamaContext.java +115 -27
  4. package/android/src/main/java/com/rnllama/RNLlama.java +40 -7
  5. package/android/src/main/jni.cpp +222 -34
  6. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +9 -4
  7. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +9 -4
  8. package/cpp/common.cpp +1682 -2114
  9. package/cpp/common.h +600 -613
  10. package/cpp/ggml-aarch64.c +129 -3478
  11. package/cpp/ggml-aarch64.h +19 -39
  12. package/cpp/ggml-alloc.c +1040 -1040
  13. package/cpp/ggml-alloc.h +76 -76
  14. package/cpp/ggml-backend-impl.h +216 -216
  15. package/cpp/ggml-backend-reg.cpp +195 -0
  16. package/cpp/ggml-backend.cpp +1997 -2661
  17. package/cpp/ggml-backend.h +328 -314
  18. package/cpp/ggml-common.h +1853 -1853
  19. package/cpp/ggml-cpp.h +38 -38
  20. package/cpp/ggml-cpu-aarch64.c +3560 -0
  21. package/cpp/ggml-cpu-aarch64.h +30 -0
  22. package/cpp/ggml-cpu-impl.h +371 -614
  23. package/cpp/ggml-cpu-quants.c +10822 -0
  24. package/cpp/ggml-cpu-quants.h +63 -0
  25. package/cpp/ggml-cpu.c +13975 -13720
  26. package/cpp/ggml-cpu.cpp +663 -0
  27. package/cpp/ggml-cpu.h +177 -150
  28. package/cpp/ggml-impl.h +550 -296
  29. package/cpp/ggml-metal.h +66 -66
  30. package/cpp/ggml-metal.m +4294 -3933
  31. package/cpp/ggml-quants.c +5247 -15739
  32. package/cpp/ggml-quants.h +100 -147
  33. package/cpp/ggml-threading.cpp +12 -0
  34. package/cpp/ggml-threading.h +12 -0
  35. package/cpp/ggml.c +8180 -8390
  36. package/cpp/ggml.h +2411 -2441
  37. package/cpp/llama-grammar.cpp +1138 -1138
  38. package/cpp/llama-grammar.h +144 -144
  39. package/cpp/llama-impl.h +181 -181
  40. package/cpp/llama-sampling.cpp +2348 -2345
  41. package/cpp/llama-sampling.h +48 -48
  42. package/cpp/llama-vocab.cpp +1984 -1984
  43. package/cpp/llama-vocab.h +170 -170
  44. package/cpp/llama.cpp +22132 -22046
  45. package/cpp/llama.h +1253 -1255
  46. package/cpp/log.cpp +401 -401
  47. package/cpp/log.h +121 -121
  48. package/cpp/rn-llama.hpp +83 -19
  49. package/cpp/sampling.cpp +466 -466
  50. package/cpp/sgemm.cpp +1884 -1276
  51. package/ios/RNLlama.mm +43 -20
  52. package/ios/RNLlamaContext.h +9 -3
  53. package/ios/RNLlamaContext.mm +133 -33
  54. package/jest/mock.js +0 -1
  55. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  56. package/lib/commonjs/index.js +52 -15
  57. package/lib/commonjs/index.js.map +1 -1
  58. package/lib/module/NativeRNLlama.js.map +1 -1
  59. package/lib/module/index.js +51 -15
  60. package/lib/module/index.js.map +1 -1
  61. package/lib/typescript/NativeRNLlama.d.ts +29 -5
  62. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  63. package/lib/typescript/index.d.ts +12 -5
  64. package/lib/typescript/index.d.ts.map +1 -1
  65. package/package.json +1 -1
  66. package/src/NativeRNLlama.ts +41 -6
  67. package/src/index.ts +82 -27
  68. package/cpp/json-schema-to-grammar.cpp +0 -1045
  69. package/cpp/json-schema-to-grammar.h +0 -8
  70. package/cpp/json.hpp +0 -24766
@@ -1,1984 +1,1984 @@
1
- #include "llama-vocab.h"
2
-
3
- #include "unicode.h"
4
-
5
- #include <algorithm>
6
- #include <cassert>
7
- #include <cfloat>
8
- #include <climits>
9
- #include <cstdarg>
10
- #include <cstring>
11
- #include <forward_list>
12
- #include <queue>
13
- #include <sstream>
14
-
15
- //
16
- // helpers
17
- //
18
-
19
- LLAMA_ATTRIBUTE_FORMAT(1, 2)
20
- static std::string format(const char * fmt, ...) {
21
- va_list ap;
22
- va_list ap2;
23
- va_start(ap, fmt);
24
- va_copy(ap2, ap);
25
- int size = vsnprintf(NULL, 0, fmt, ap);
26
- LM_GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
27
- std::vector<char> buf(size + 1);
28
- int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
29
- LM_GGML_ASSERT(size2 == size);
30
- va_end(ap2);
31
- va_end(ap);
32
- return std::string(buf.data(), size);
33
- }
34
-
35
- struct naive_trie {
36
- naive_trie() : has_value(false), value(0) {
37
- }
38
- void insert(const char * key, size_t len, int32_t value = 0) {
39
- if (len == 0) {
40
- this->has_value = true;
41
- this->value = value;
42
- return;
43
- }
44
- char c = key[0];
45
- auto res = children.find(c);
46
- if (res != children.end()) {
47
- res->second.insert(key + 1, len - 1, value);
48
- } else {
49
- auto res = children.insert(std::make_pair(c, naive_trie()));
50
- res.first->second.insert(key + 1, len - 1, value);
51
- }
52
- }
53
- std::pair<const char *, size_t> get_longest_prefix(const char * key, size_t len, size_t offset = 0) const {
54
- if (len == 0 || offset == len) {
55
- return std::make_pair(key, offset);
56
- }
57
- char c = key[offset];
58
- auto res = children.find(c);
59
- if (res != children.end()) {
60
- return res->second.get_longest_prefix(key, len, offset + 1);
61
- }
62
-
63
- return std::make_pair(key, offset);
64
- }
65
- const struct naive_trie * traverse(const char c) const {
66
- auto res = children.find(c);
67
- if (res != children.end()) {
68
- return &res->second;
69
- }
70
-
71
- return NULL;
72
- }
73
- std::map<char, struct naive_trie> children;
74
- bool has_value;
75
- llama_token value;
76
- };
77
-
78
- //
79
- // impl
80
- //
81
-
82
- struct llm_tokenizer {
83
- llm_tokenizer() {}
84
- virtual ~llm_tokenizer() = default;
85
- };
86
-
87
- llama_vocab::~llama_vocab() {
88
- delete tokenizer;
89
- }
90
-
91
- int llama_vocab::find_bpe_rank(const std::string & token_left, const std::string & token_right) const {
92
- LM_GGML_ASSERT(token_left.find(' ') == std::string::npos);
93
- LM_GGML_ASSERT(token_left.find('\n') == std::string::npos);
94
- LM_GGML_ASSERT(token_right.find(' ') == std::string::npos);
95
- LM_GGML_ASSERT(token_right.find('\n') == std::string::npos);
96
-
97
- auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
98
- if (it == bpe_ranks.end()) {
99
- return -1;
100
- }
101
-
102
- return it->second;
103
- }
104
-
105
- static enum llama_vocab_type llama_vocab_get_type(const llama_vocab & vocab) {
106
- return vocab.type;
107
- }
108
-
109
- static bool llama_is_normal_token(const llama_vocab & vocab, llama_token id) {
110
- LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
111
- return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_NORMAL;
112
- }
113
-
114
- static bool llama_is_unknown_token(const llama_vocab & vocab, llama_token id) {
115
- LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
116
- return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_UNKNOWN;
117
- }
118
-
119
- static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) {
120
- LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
121
- return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_CONTROL;
122
- }
123
-
124
- static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
125
- LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
126
- return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_BYTE;
127
- }
128
-
129
- static bool llama_is_user_defined_token(const llama_vocab & vocab, llama_token id) {
130
- LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
131
- return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_USER_DEFINED;
132
- }
133
-
134
- static bool llama_is_unused_token(const llama_vocab & vocab, llama_token id) {
135
- LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
136
- return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_UNUSED;
137
- }
138
-
139
- static uint8_t llama_token_to_byte(const llama_vocab & vocab, llama_token id) {
140
- LM_GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
141
- LM_GGML_ASSERT(llama_is_byte_token(vocab, id));
142
- const auto & token_data = vocab.id_to_token.at(id);
143
- switch (llama_vocab_get_type(vocab)) {
144
- case LLAMA_VOCAB_TYPE_SPM:
145
- case LLAMA_VOCAB_TYPE_UGM: {
146
- auto buf = token_data.text.substr(3, 2);
147
- return strtol(buf.c_str(), NULL, 16);
148
- }
149
- case LLAMA_VOCAB_TYPE_BPE: {
150
- LM_GGML_ABORT("fatal error");
151
- //return unicode_utf8_to_byte(token_data.text); // TODO: why is this here after LM_GGML_ASSERT?
152
- }
153
- case LLAMA_VOCAB_TYPE_WPM: {
154
- LM_GGML_ABORT("fatal error");
155
- }
156
- default:
157
- LM_GGML_ABORT("fatal error");
158
- }
159
- }
160
-
161
- static void llama_escape_whitespace(std::string & text) {
162
- replace_all(text, " ", "\xe2\x96\x81");
163
- }
164
-
165
- static void llama_unescape_whitespace(std::string & word) {
166
- replace_all(word, "\xe2\x96\x81", " ");
167
- }
168
-
169
- struct llm_symbol {
170
- using index = int;
171
- index prev;
172
- index next;
173
- const char * text;
174
- size_t n;
175
- };
176
-
177
- static_assert(std::is_trivially_copyable<llm_symbol>::value, "llm_symbol is not trivially copyable");
178
-
179
- //
180
- // SPM tokenizer
181
- // original implementation:
182
- // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
183
- //
184
-
185
- struct llm_bigram_spm {
186
- struct comparator {
187
- bool operator()(llm_bigram_spm & l, llm_bigram_spm & r) {
188
- return (l.score < r.score) || (l.score == r.score && l.left > r.left);
189
- }
190
- };
191
- using queue_storage = std::vector<llm_bigram_spm>;
192
- using queue = std::priority_queue<llm_bigram_spm, queue_storage, comparator>;
193
- llm_symbol::index left;
194
- llm_symbol::index right;
195
- float score;
196
- size_t size;
197
- };
198
-
199
- struct llm_tokenizer_spm : llm_tokenizer {
200
- llm_tokenizer_spm(const llama_vocab & /*vocab*/) : llm_tokenizer() {}
201
- };
202
-
203
- struct llm_tokenizer_spm_session {
204
- llm_tokenizer_spm_session(const llama_vocab & vocab) : vocab(vocab) {}
205
-
206
- void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
207
-
208
- // split string into utf8 chars
209
- int index = 0;
210
- size_t offs = 0;
211
- while (offs < text.size()) {
212
- llm_symbol sym;
213
- size_t len = unicode_len_utf8(text[offs]);
214
- sym.text = text.c_str() + offs;
215
- sym.n = std::min(len, text.size() - offs);
216
- offs += sym.n;
217
- sym.prev = index - 1;
218
- sym.next = offs == text.size() ? -1 : index + 1;
219
- index++;
220
- symbols.emplace_back(sym);
221
- }
222
-
223
- // seed the work queue with all possible 2-character tokens.
224
- for (int i = 1; i < (int) symbols.size(); ++i) {
225
- try_add_bigram(i - 1, i);
226
- }
227
-
228
- // keep substituting the highest frequency pairs for as long as we can.
229
- while (!work_queue.empty()) {
230
- auto bigram = work_queue.top();
231
- work_queue.pop();
232
-
233
- auto & left_sym = symbols[bigram.left];
234
- auto & right_sym = symbols[bigram.right];
235
-
236
- // if one of the symbols already got merged, skip it.
237
- if (left_sym.n == 0 || right_sym.n == 0 ||
238
- left_sym.n + right_sym.n != bigram.size) {
239
- continue;
240
- }
241
-
242
- // merge the right sym into the left one
243
- left_sym.n += right_sym.n;
244
- right_sym.n = 0;
245
-
246
- //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
247
-
248
- // remove the right sym from the chain
249
- left_sym.next = right_sym.next;
250
- if (right_sym.next >= 0) {
251
- symbols[right_sym.next].prev = bigram.left;
252
- }
253
-
254
- // find more substitutions
255
- try_add_bigram(left_sym.prev, bigram.left);
256
- try_add_bigram(bigram.left, left_sym.next);
257
- }
258
-
259
- for (int i = 0; i != -1; i = symbols[i].next) {
260
- auto & symbol = symbols[i];
261
- resegment(symbol, output);
262
- }
263
- }
264
-
265
- private:
266
- void resegment(llm_symbol & symbol, std::vector<llama_vocab::id> & output) {
267
- auto text = std::string(symbol.text, symbol.n);
268
- auto token = vocab.token_to_id.find(text);
269
-
270
- // Do we need to support is_unused?
271
- if (token != vocab.token_to_id.end()) {
272
- output.push_back((*token).second);
273
- return;
274
- }
275
-
276
- const auto p = rev_merge.find(text);
277
-
278
- if (p == rev_merge.end()) {
279
- // output any symbols that did not form tokens as bytes.
280
- output.reserve(output.size() + symbol.n);
281
- for (int j = 0; j < (int)symbol.n; ++j) {
282
- llama_vocab::id token_id = llama_byte_to_token_impl(vocab, symbol.text[j]);
283
- output.push_back(token_id);
284
- }
285
- return;
286
- }
287
-
288
- resegment(symbols[p->second.first], output);
289
- resegment(symbols[p->second.second], output);
290
- }
291
-
292
- void try_add_bigram(int left, int right) {
293
- if (left == -1 || right == -1) {
294
- return;
295
- }
296
- const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
297
- auto token = vocab.token_to_id.find(text);
298
-
299
- if (token == vocab.token_to_id.end()) {
300
- return;
301
- }
302
-
303
- if (static_cast<size_t>((*token).second) >= vocab.id_to_token.size()) {
304
- return;
305
- }
306
-
307
- const auto & tok_data = vocab.id_to_token[(*token).second];
308
-
309
- llm_bigram_spm bigram;
310
- bigram.left = left;
311
- bigram.right = right;
312
- bigram.score = tok_data.score;
313
- bigram.size = text.size();
314
-
315
- work_queue.push(bigram);
316
-
317
- // Do we need to support is_unused?
318
- rev_merge[text] = std::make_pair(left, right);
319
- }
320
-
321
- const llama_vocab & vocab;
322
- // currently unused
323
- // const llm_tokenizer_spm * spm_tokenizer;
324
-
325
- std::vector<llm_symbol> symbols;
326
- llm_bigram_spm::queue work_queue;
327
- std::map<std::string, std::pair<int, int>> rev_merge;
328
- };
329
-
330
- //
331
- // BPE tokenizer
332
- // adapted from https://github.com/cmp-nct/ggllm.cpp [MIT License]
333
- // tried to simplify unicode stuff, so most likely does not work 100% correctly!
334
- //
335
-
336
- // TODO: there are a lot of common parts between spm and bpe tokenizers, should be refactored and reused
337
-
338
- template<typename T, typename Container = std::vector<T>, typename Compare = std::less<typename Container::value_type>>
339
- class llama_priority_queue : public std::priority_queue<T, Container, Compare> {
340
- public:
341
- using std::priority_queue<T, Container, Compare>::priority_queue;
342
-
343
- T pop_move() {
344
- T item = std::move(this->c.front());
345
- std::pop_heap(this->c.begin(), this->c.end(), this->comp);
346
- this->c.pop_back();
347
- return item;
348
- }
349
-
350
- void pop() = delete;
351
- };
352
-
353
- struct llm_bigram_bpe {
354
- struct comparator {
355
- bool operator()(const llm_bigram_bpe & l, const llm_bigram_bpe & r) const {
356
- return l.rank > r.rank || (l.rank == r.rank && l.left > r.left);
357
- }
358
- };
359
-
360
- using queue_storage = std::vector<llm_bigram_bpe>;
361
- using queue = llama_priority_queue<llm_bigram_bpe, queue_storage, comparator>;
362
- llm_symbol::index left;
363
- llm_symbol::index right;
364
- std::string text;
365
- int rank;
366
- size_t size;
367
- };
368
-
369
- struct llm_tokenizer_bpe : llm_tokenizer {
370
- llm_tokenizer_bpe(const llama_vocab & vocab) : llm_tokenizer() {
371
- LM_GGML_ASSERT(vocab.type == LLAMA_VOCAB_TYPE_BPE);
372
- switch (vocab.type_pre) {
373
- case LLAMA_VOCAB_PRE_TYPE_LLAMA3:
374
- regex_exprs = {
375
- // original regex from tokenizer.json
376
- //"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
377
-
378
- // adapted: https://github.com/ggerganov/llama.cpp/pull/6920#issuecomment-2080233989
379
- "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
380
- };
381
- break;
382
- case LLAMA_VOCAB_PRE_TYPE_DBRX:
383
- case LLAMA_VOCAB_PRE_TYPE_SMAUG:
384
- regex_exprs = {
385
- // same as llama3
386
- "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
387
- };
388
- break;
389
- case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM:
390
- regex_exprs = {
391
- "[\r\n]",
392
- "\\s?[A-Za-zµÀ-ÖØ-öø-ƺƼ-ƿDŽ-ʓʕ-ʯͰ-ͳͶͷͻ-ͽͿΆΈ-ΊΌΎ-ΡΣ-ϵϷ-ҁҊ-ԯԱ-ՖႠ-ჅᎠ-Ᏽᏸ-ᏽᲐ-ᲺᲽ-Ჿᴀ-ᴫᵫ-ᵷᵹ-ᶚḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙὛὝὟ-ώᾀ-ᾴᾶ-ᾼιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼℂℇℊ-ℓℕℙ-ℝℤΩℨK-ℭℯ-ℴℹℼ-ℿⅅ-ⅉⅎↃↄⰀ-ⱻⱾ-ⳤⳫ-ⳮⳲⳳꙀ-ꙭꚀ-ꚛꜢ-ꝯꝱ-ꞇꞋ-ꞎꭰ-ꮿff-stﬓ-ﬗA-Za-z𐐀-𐑏𐒰-𐓓𐓘-𐓻𐲀-𐲲𐳀-𐳲𑢠-𑣟𞤀-𞥃]+",
393
- "\\s?[!-/:-~!-/:-~‘-‟ -。]+",
394
- "\\s+$",
395
- "[一-龥ࠀ-一가-퟿]+",
396
- "\\p{N}+",
397
- };
398
- break;
399
- case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER:
400
- regex_exprs = {
401
- "[\r\n]",
402
- "\\s?\\p{L}+",
403
- "\\s?\\p{P}+",
404
- "[一-龥ࠀ-一가-퟿]+",
405
- "\\p{N}",
406
- };
407
- break;
408
- case LLAMA_VOCAB_PRE_TYPE_FALCON:
409
- regex_exprs = {
410
- "[\\p{P}\\$\\+<=>\\^~\\|`]+",
411
- "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
412
- "[0-9][0-9][0-9]",
413
- };
414
- break;
415
- case LLAMA_VOCAB_PRE_TYPE_STARCODER:
416
- case LLAMA_VOCAB_PRE_TYPE_REFACT:
417
- case LLAMA_VOCAB_PRE_TYPE_COMMAND_R:
418
- case LLAMA_VOCAB_PRE_TYPE_SMOLLM:
419
- case LLAMA_VOCAB_PRE_TYPE_CODESHELL:
420
- case LLAMA_VOCAB_PRE_TYPE_EXAONE:
421
- regex_exprs = {
422
- "\\p{N}",
423
- "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
424
- };
425
- break;
426
- case LLAMA_VOCAB_PRE_TYPE_GPT2:
427
- case LLAMA_VOCAB_PRE_TYPE_MPT:
428
- case LLAMA_VOCAB_PRE_TYPE_OLMO:
429
- case LLAMA_VOCAB_PRE_TYPE_JAIS:
430
- regex_exprs = {
431
- "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
432
- };
433
- break;
434
- case LLAMA_VOCAB_PRE_TYPE_STABLELM2:
435
- case LLAMA_VOCAB_PRE_TYPE_QWEN2:
436
- regex_exprs = {
437
- // original regex from tokenizer.json
438
- // "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
439
- "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
440
- };
441
- break;
442
- case LLAMA_VOCAB_PRE_TYPE_PORO:
443
- case LLAMA_VOCAB_PRE_TYPE_BLOOM:
444
- case LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH:
445
- regex_exprs = {
446
- " ?[^(\\s|.,!?…。,、।۔،)]+",
447
- };
448
- break;
449
- case LLAMA_VOCAB_PRE_TYPE_CHATGLM4:
450
- regex_exprs = {
451
- "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
452
- };
453
- break;
454
- case LLAMA_VOCAB_PRE_TYPE_VIKING:
455
- regex_exprs = {
456
- " ?[^(\\s|.,!?…。,、।۔،)]+",
457
- "\\p{N}",
458
- };
459
- break;
460
- case LLAMA_VOCAB_PRE_TYPE_TEKKEN:
461
- // original regex from tokenizer.json
462
- // "[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]*[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]+|[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]+[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]*|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
463
- regex_exprs = {
464
- "[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))*((?=[\\p{L}])([^A-Z]))+|[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))+((?=[\\p{L}])([^A-Z]))*|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
465
- };
466
- break;
467
- case LLAMA_VOCAB_PRE_TYPE_CHAMELEON:
468
- // Note: in theory, the special token (sentinel and image token) regex_exprs below
469
- // are unnecessary, as they are split in `tokenizer_st_partition` anyway.
470
- // However, since the upstream pre-tokenizer uses them, they are also
471
- // included here (see https://huggingface.co/facebook/chameleon-7b).
472
- regex_exprs = {
473
- "<sentinel:[0-9]+>", // Sentinel tokens
474
- "(IMGIMG)((A|B|C|D|E|F|G|H|I){1,4})Z", // Image tokens
475
- "([\\t\\n]| | )", // directly from tokenizer.json
476
- "\\p{N}", // Individual digits
477
- "[\\p{P}!-/:-@\\[-`{-~]", // Punctuation, Isolated
478
- "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
479
- };
480
- break;
481
- default:
482
- // default regex for BPE tokenization pre-processing
483
- regex_exprs = {
484
- "[\\p{P}\\$\\+<=>\\^~\\|]+",
485
- "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
486
- "\\p{N}+",
487
- "[0-9][0-9][0-9]",
488
- };
489
- break;
490
- }
491
- }
492
-
493
- std::vector<std::string> regex_exprs;
494
- };
495
-
496
- struct llm_tokenizer_bpe_session {
497
- llm_tokenizer_bpe_session(const llama_vocab & vocab) : vocab(vocab),
498
- bpe_tokenizer(static_cast<const llm_tokenizer_bpe *>(vocab.tokenizer)) {}
499
-
500
- static void append(const llama_vocab::id token_id, std::vector<llama_vocab::id> & output) {
501
- output.push_back(token_id);
502
- }
503
-
504
- bool append_bos(std::vector<llama_vocab::id> & output) const {
505
- if (vocab.tokenizer_add_bos) {
506
- LM_GGML_ASSERT(vocab.special_bos_id != -1);
507
- output.push_back(vocab.special_bos_id);
508
- return true;
509
- }
510
- return false;
511
- }
512
-
513
- bool append_eos(std::vector<llama_vocab::id> & output) const {
514
- if (vocab.tokenizer_add_eos) {
515
- LM_GGML_ASSERT(vocab.special_eos_id != -1);
516
- output.push_back(vocab.special_eos_id);
517
- return true;
518
- }
519
- return false;
520
- }
521
-
522
- void check_double_bos_eos(const std::vector<llama_vocab::id> & output) const {
523
- if (vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
524
- LLAMA_LOG_WARN(
525
- "%s: Added a BOS token to the prompt as specified by the model but the prompt "
526
- "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
527
- "Are you sure this is what you want?\n", __FUNCTION__);
528
- }
529
- if (vocab.tokenizer_add_eos && output.size() >= 2 && *(output.end()-2) == vocab.special_eos_id) {
530
- LLAMA_LOG_WARN(
531
- "%s: Added a EOS token to the prompt as specified by the model but the prompt "
532
- "also ends with a EOS token. So now the final prompt ends with 2 EOS tokens. "
533
- "Are you sure this is what you want?\n", __FUNCTION__);
534
- }
535
- }
536
-
537
- void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
538
- int final_prev_index = -1;
539
- const auto word_collection = unicode_regex_split(text, bpe_tokenizer->regex_exprs);
540
-
541
- symbols_final.clear();
542
-
543
- for (const auto & word : word_collection) {
544
- work_queue = llm_bigram_bpe::queue();
545
- symbols.clear();
546
-
547
- int index = 0;
548
- size_t offset = 0;
549
-
550
- if (vocab.tokenizer_ignore_merges && vocab.token_to_id.find(word) != vocab.token_to_id.end()) {
551
- symbols.emplace_back(llm_symbol{-1, -1, word.c_str(), word.size()});
552
- offset = word.size();
553
- }
554
-
555
- while (offset < word.size()) {
556
- llm_symbol sym;
557
- size_t char_len = std::min(word.size() - offset, (size_t) unicode_len_utf8(word[offset]));
558
- sym.text = word.c_str() + offset;
559
- sym.n = char_len;
560
- offset += sym.n;
561
- sym.prev = index - 1;
562
- sym.next = offset == word.size() ? -1 : index + 1;
563
- index++;
564
- symbols.emplace_back(sym);
565
- }
566
- for (int i = 1; i < (int) symbols.size(); ++i) {
567
- add_new_bigram(i - 1, i);
568
- }
569
-
570
- // build token(s)
571
- while (!work_queue.empty()) {
572
- auto bigram = work_queue.pop_move();
573
-
574
- auto & left_symbol = symbols[bigram.left];
575
- auto & right_symbol = symbols[bigram.right];
576
-
577
- if (left_symbol.n == 0 || right_symbol.n == 0) {
578
- continue;
579
- }
580
- std::string left_token = std::string(left_symbol.text, left_symbol.n);
581
- std::string right_token = std::string(right_symbol.text, right_symbol.n);
582
- if (left_token + right_token != bigram.text) {
583
- continue; // Skip this bigram if it's outdated
584
- }
585
-
586
- // merge the right sym into the left one
587
- left_symbol.n += right_symbol.n;
588
- right_symbol.n = 0;
589
-
590
- // remove the right sym from the chain
591
- left_symbol.next = right_symbol.next;
592
- if (right_symbol.next >= 0) {
593
- symbols[right_symbol.next].prev = bigram.left;
594
- }
595
-
596
- add_new_bigram(left_symbol.prev, bigram.left); // left side of current symbol
597
- add_new_bigram(bigram.left, left_symbol.next); // right side of current symbol
598
- }
599
-
600
- // add the finished tokens to the final list keeping correct order for next and prev
601
- for (auto & sym : symbols) {
602
- if (sym.n > 0) {
603
- sym.prev = final_prev_index;
604
- sym.next = -1;
605
- if (final_prev_index != -1) {
606
- symbols_final[final_prev_index].next = symbols_final.size();
607
- }
608
- symbols_final.emplace_back(sym);
609
- final_prev_index = symbols_final.size() - 1;
610
- }
611
- }
612
- }
613
-
614
- symbols = symbols_final;
615
-
616
- if (!symbols.empty()) {
617
- for (int i = 0; i != -1; i = symbols[i].next) {
618
- auto & symbol = symbols[i];
619
- if (symbol.n == 0) {
620
- continue;
621
- }
622
-
623
- const std::string str = std::string(symbol.text, symbol.n);
624
- const auto token = vocab.token_to_id.find(str);
625
-
626
- if (token == vocab.token_to_id.end()) {
627
- for (auto j = str.begin(); j != str.end(); ++j) {
628
- std::string byte_str(1, *j);
629
- auto token_multibyte = vocab.token_to_id.find(byte_str);
630
- if (token_multibyte != vocab.token_to_id.end()) {
631
- output.push_back(token_multibyte->second);
632
- }
633
- }
634
- } else {
635
- output.push_back((*token).second);
636
- }
637
- }
638
- }
639
- }
640
-
641
- private:
642
- void add_new_bigram(int left, int right) {
643
- if (left == -1 || right == -1) {
644
- return;
645
- }
646
- std::string left_token = std::string(symbols[left].text, symbols[left].n);
647
- std::string right_token = std::string(symbols[right].text, symbols[right].n);
648
-
649
- int rank_found = -1;
650
-
651
- rank_found = vocab.find_bpe_rank(left_token, right_token);
652
-
653
- if (rank_found < 0) {
654
- return;
655
- }
656
-
657
- llm_bigram_bpe bigram;
658
-
659
- bigram.left = left;
660
- bigram.right = right;
661
- bigram.text = left_token + right_token;
662
- bigram.size = left_token.size() + right_token.size();
663
- bigram.rank = rank_found;
664
-
665
- work_queue.push(bigram);
666
- }
667
-
668
- const llama_vocab & vocab;
669
- const llm_tokenizer_bpe * bpe_tokenizer;
670
-
671
- std::vector<llm_symbol> symbols;
672
- std::vector<llm_symbol> symbols_final;
673
- llm_bigram_bpe::queue work_queue;
674
- };
675
-
676
- //
677
- // WPM tokenizer
678
- //
679
-
680
- struct llm_tokenizer_wpm : llm_tokenizer {
681
- llm_tokenizer_wpm(const llama_vocab & /*vocab*/) : llm_tokenizer() {}
682
- };
683
-
684
- struct llm_tokenizer_wpm_session {
685
- llm_tokenizer_wpm_session(const llama_vocab & vocab) : vocab(vocab) {}
686
-
687
- void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
688
- const auto & token_map = vocab.token_to_id;
689
- // normalize and split by whitespace
690
- std::vector<std::string> words = preprocess(text);
691
- // bos token prepended already
692
-
693
- // find the longest tokens that form the words
694
- for (const std::string & word : words) {
695
- // skip empty words
696
- if (word.size() == 0) {
697
- continue;
698
- }
699
-
700
- // prepend phantom space
701
- const std::string word1 = "\xe2\x96\x81" + word;
702
- const int n = word1.size();
703
-
704
- const size_t current_tokens = output.size();
705
-
706
- // we're at the start of a new word
707
- // move through character position in word
708
- for (int i = 0; i < n; ++i) {
709
- // loop through possible match length
710
- bool match = false;
711
- for (int j = std::min(n, i + vocab.max_token_len + 1); j > i; j--) {
712
- auto it = token_map.find(word1.substr(i, j - i));
713
- if (it != token_map.end()) {
714
- output.push_back(it->second);
715
- match = true;
716
- i = j - 1;
717
- break;
718
- }
719
- }
720
-
721
- if (!match) { // discard all
722
- output.resize(current_tokens);
723
- break; // and discard next tokens
724
- }
725
- }
726
-
727
- // we didn't find any matches for this word
728
- if (current_tokens == output.size()) {
729
- output.push_back(vocab.special_unk_id);
730
- }
731
- }
732
- }
733
-
734
- // TODO: reduce string copies by using cpts_offs array
735
- static std::vector<std::string> preprocess(const std::string & text) {
736
- const std::vector<uint32_t> cpts_nfd = unicode_cpts_normalize_nfd(unicode_cpts_from_utf8(text));
737
- std::vector<std::string> words(1, "");
738
-
739
- for (const uint32_t cpt : cpts_nfd) {
740
- const auto flags = unicode_cpt_flags(cpt);
741
-
742
- if (flags.is_whitespace) {
743
- if (words.back().size()) { // finish previous word if any
744
- words.emplace_back();
745
- }
746
- continue;
747
- }
748
-
749
- assert (!flags.is_separator);
750
- if (cpt == 0 || cpt == 0xFFFD || flags.is_control) {
751
- continue;
752
- }
753
-
754
- const std::string s = unicode_cpt_to_utf8(unicode_tolower(cpt));
755
- if (flags.is_punctuation || ( cpt < 0x7F && flags.is_symbol ) || is_chinese_char(cpt)) {
756
- if (words.back().size()) { // finish previous word if any
757
- words.emplace_back();
758
- }
759
- words.back() = s; // single char word
760
- words.emplace_back(); // start a new word
761
- } else {
762
- words.back() += s; // append char to word
763
- }
764
- }
765
-
766
- if (!words.back().size()) {
767
- words.pop_back();
768
- }
769
-
770
- return words;
771
- }
772
-
773
- static bool is_chinese_char(uint32_t cpt) {
774
- return
775
- (cpt >= 0x04E00 && cpt <= 0x09FFF) ||
776
- (cpt >= 0x03400 && cpt <= 0x04DBF) ||
777
- (cpt >= 0x20000 && cpt <= 0x2A6DF) ||
778
- (cpt >= 0x2A700 && cpt <= 0x2B73F) ||
779
- (cpt >= 0x2B740 && cpt <= 0x2B81F) ||
780
- (cpt >= 0x2B920 && cpt <= 0x2CEAF) || // this should be 0x2B820 but in hf rust code it is 0x2B920
781
- (cpt >= 0x0F900 && cpt <= 0x0FAFF) ||
782
- (cpt >= 0x2F800 && cpt <= 0x2FA1F);
783
- //(cpt >= 0x3000 && cpt <= 0x303F) ||
784
- //(cpt >= 0xFF00 && cpt <= 0xFFEF);
785
- }
786
-
787
- private:
788
- const llama_vocab & vocab;
789
- // currently unused
790
- // const llm_tokenizer_wpm * wpm_tokenizer;
791
- };
792
-
793
- //
794
- // UGM tokenizer
795
- //
796
-
797
- struct llm_tokenizer_ugm : llm_tokenizer {
798
- llm_tokenizer_ugm(const llama_vocab & vocab) : llm_tokenizer() {
799
- if (vocab.precompiled_charsmap.size() > 0) {
800
- size_t charsmap_offset = 0;
801
-
802
- // First four bytes of precompiled_charsmap contains length of binary
803
- // blob containing XOR-compressed compact double array (XCDA) entries
804
- uint32_t xcda_blob_size = *(const uint32_t *) &vocab.precompiled_charsmap[0];
805
- charsmap_offset += sizeof(xcda_blob_size);
806
- if (xcda_blob_size + charsmap_offset >= vocab.precompiled_charsmap.size()) {
807
- throw std::runtime_error("Index out of array bounds in precompiled charsmap!");
808
- }
809
-
810
- // Next xcda_blob_size bytes contain entries of XOR-compressed compact
811
- // double array (XCDA). Each entry is bit-packed into a 32-bit integer.
812
- xcda_array = (const uint32_t *) &vocab.precompiled_charsmap[charsmap_offset];
813
- xcda_array_size = xcda_blob_size / sizeof(uint32_t);
814
- charsmap_offset += xcda_blob_size;
815
-
816
- // Remaining bytes of precompiled charsmap contain null-terminated
817
- // replacement strings for prefixes matched by the XCDA.
818
- prefix_replacements = &vocab.precompiled_charsmap[charsmap_offset];
819
- prefix_replacements_size = vocab.precompiled_charsmap.size() - charsmap_offset;
820
- }
821
-
822
- for (unsigned int id = 0; id < vocab.id_to_token.size(); ++id) {
823
- const auto &token_data = vocab.id_to_token[id];
824
-
825
- if (llama_is_normal_token(vocab, id)) {
826
- min_score = std::min<float>(min_score, token_data.score);
827
- max_score = std::max<float>(max_score, token_data.score);
828
- }
829
-
830
- if (llama_is_normal_token(vocab, id) ||
831
- llama_is_user_defined_token(vocab, id) ||
832
- llama_is_unused_token(vocab, id)) {
833
- token_matcher.insert(token_data.text.data(), token_data.text.size(), id);
834
- }
835
-
836
- if (llama_is_user_defined_token(vocab, id)) {
837
- user_defined_token_matcher.insert(token_data.text.data(), token_data.text.size());
838
- }
839
- }
840
-
841
- unknown_token_score = min_score - unknown_token_score_penalty;
842
- }
843
-
844
- // escaped space symbol - U+2581 (Lower One Eighth Block)
845
- const std::string escaped_space = "\xE2\x96\x81";
846
-
847
- const char * prefix_replacements = NULL;
848
- size_t prefix_replacements_size = 0;
849
-
850
- const uint32_t * xcda_array = NULL;
851
- size_t xcda_array_size = 0;
852
-
853
- struct naive_trie user_defined_token_matcher;
854
-
855
- float min_score = FLT_MAX;
856
- float max_score = -FLT_MAX;
857
-
858
- float unknown_token_score_penalty = 10.0;
859
- float unknown_token_score;
860
-
861
- struct naive_trie token_matcher;
862
- };
863
-
864
- struct llm_tokenizer_ugm_session {
865
- llm_tokenizer_ugm_session(const llama_vocab & vocab) : vocab(vocab),
866
- ugm_tokenizer(static_cast<const llm_tokenizer_ugm *>(vocab.tokenizer)) {}
867
-
868
- /* This implementation is based on SentencePiece optimized Viterbi algorithm for
869
- * unigram language models. The general idea is to:
870
- * - move along the input sequence in steps of one UTF code point,
871
- * - at each step find all possible tokenizations of the prefix by
872
- * traversing the tokens trie,
873
- * - for each tokenization store the best one so far (by higher score)
874
- * - use the position in sequence after given token as an index to store
875
- * results
876
- * - if there was no valid tokenization of the current UTF code point
877
- * then use unknown token with additional score penalty
878
- * After processing the whole sequence we backtrack from the end to get
879
- * the best tokenization.
880
- */
881
- void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
882
- // get current size of output (for reversal later)
883
- size_t output_size = output.size();
884
-
885
- // normalize the input first
886
- std::string normalized;
887
- normalize(text, &normalized);
888
- size_t input_len = normalized.size();
889
- if (input_len == 0) {
890
- return;
891
- }
892
-
893
- // initialize score_sum to -FLT_MAX so it will be always lower than sums of token scores
894
- std::vector<struct best_tokenization> tokenization_results(input_len + 1, {vocab.special_unk_id, 0, -FLT_MAX});
895
- // at the beginning tokenization score is zero
896
- tokenization_results[0] = { vocab.special_unk_id, 0, 0 };
897
-
898
- for (size_t input_offset = 0; input_offset < input_len;) {
899
- size_t prefix_offset = input_offset;
900
- // calculate how many code units are in the currently processed UTF code point
901
- size_t n_utf8_code_units = std::min<size_t>(unicode_len_utf8(normalized[input_offset]), input_len - input_offset);
902
-
903
- // traverse the token matcher trie to find a matching token
904
- bool single_codepoint_token_found = false;
905
- const struct best_tokenization & current_best = tokenization_results[input_offset];
906
- const struct naive_trie * node = ugm_tokenizer->token_matcher.traverse(normalized[prefix_offset++]);
907
-
908
- while (prefix_offset <= input_len && node != NULL) {
909
- // check if we found valid token in prefix
910
- if (node->has_value) {
911
- // check if it corresponds to the whole UTF code point
912
- if (prefix_offset - input_offset == n_utf8_code_units) {
913
- single_codepoint_token_found = true;
914
- }
915
- llama_token token_id = node->value;
916
- const auto & token_data = vocab.id_to_token[token_id];
917
-
918
- // we set the user-defined token scores to 0 to make them more likely to be selected
919
- // (normal token scores are log probabilities, so they are negative)
920
- // score type is double here to make tokenization results exactly
921
- // the same as in the HF tokenizer using SentencePiece
922
- const double token_score = llama_is_user_defined_token(vocab, token_id) ? 0.0 : token_data.score;
923
- const double challenger_score = current_best.score_sum + token_score;
924
- struct best_tokenization & current_champ = tokenization_results[prefix_offset];
925
- if (challenger_score > current_champ.score_sum) {
926
- struct best_tokenization challenger = { token_id, input_offset, (float) challenger_score };
927
- current_champ = challenger;
928
- }
929
- }
930
- node = node->traverse(normalized[prefix_offset++]);
931
- }
932
-
933
- // if we didn't find a valid token corresponding to the whole UTF code point
934
- // then use unknown token as the tokenization of this UTF code point
935
- if (!single_codepoint_token_found) {
936
- const double challenger_score = current_best.score_sum + ugm_tokenizer->unknown_token_score;
937
- prefix_offset = input_offset + n_utf8_code_units;
938
- struct best_tokenization & current_champ = tokenization_results[prefix_offset];
939
- if (challenger_score > current_champ.score_sum) {
940
- struct best_tokenization challenger = { vocab.special_unk_id, input_offset, (float) challenger_score };
941
- current_champ = challenger;
942
- }
943
- }
944
-
945
- // move to the next UTF code point
946
- input_offset += n_utf8_code_units;
947
- }
948
-
949
- // now backtrack from the end to gather token ids of the best tokenization
950
- // merge sequences of consecutive unknown tokens into single unknown tokens
951
- bool is_prev_unknown = false;
952
- for (struct best_tokenization & tokenization = tokenization_results[input_len]; ; tokenization = tokenization_results[tokenization.input_offset]) {
953
- bool is_unknown = tokenization.token_id == vocab.special_unk_id;
954
- if (!(is_prev_unknown && is_unknown)) {
955
- output.push_back(tokenization.token_id);
956
- }
957
- if (tokenization.input_offset == 0) {
958
- break;
959
- }
960
- is_prev_unknown = is_unknown;
961
- }
962
-
963
- // reverse the output since we added tokens starting from the end of the input
964
- std::reverse(output.begin() + output_size, output.end());
965
- }
966
-
967
- private:
968
-
969
- // helper structure for returning normalization results
970
- struct normalization_result {
971
- const char * normalized;
972
- size_t normalized_len;
973
- size_t consumed_input;
974
- };
975
-
976
- void normalize(const std::string& input, std::string * normalized) {
977
- normalized->clear();
978
- normalized->reserve(input.size() * 3);
979
-
980
- const std::string space = vocab.tokenizer_escape_whitespaces ? ugm_tokenizer->escaped_space : " ";
981
-
982
- bool shall_prepend_space = !vocab.tokenizer_treat_whitespace_as_suffix && vocab.tokenizer_add_space_prefix;
983
- bool shall_append_space = vocab.tokenizer_treat_whitespace_as_suffix && vocab.tokenizer_add_space_prefix;
984
- bool shall_merge_spaces = vocab.tokenizer_remove_extra_whitespaces;
985
-
986
- bool is_space_prepended = false;
987
- bool processing_non_ws = false;
988
-
989
- size_t input_len = input.size();
990
-
991
- for (size_t input_offset = 0; input_offset < input_len; ) {
992
- auto norm_res = normalize_prefix(input, input_offset);
993
- for (size_t i = 0; i < norm_res.normalized_len; i++) {
994
- char c = norm_res.normalized[i];
995
- if (c != ' ') {
996
- if (!processing_non_ws) {
997
- processing_non_ws = true;
998
- if ((shall_prepend_space && !is_space_prepended) || shall_merge_spaces) {
999
- normalized->append(space);
1000
- is_space_prepended = true;
1001
- }
1002
- }
1003
- normalized->push_back(c);
1004
- } else {
1005
- if (processing_non_ws) {
1006
- processing_non_ws = false;
1007
- }
1008
- if (!shall_merge_spaces) {
1009
- normalized->append(space);
1010
- }
1011
- }
1012
- }
1013
-
1014
- input_offset += norm_res.consumed_input;
1015
- }
1016
-
1017
- if (shall_append_space) {
1018
- normalized->append(space);
1019
- }
1020
- }
1021
-
1022
- /*
1023
- * This structure is a view wrapper for XOR-compressed double array (XCDA)
1024
- * See Shunsuke Kanda (2018). Space- and Time-Efficient String Dictionaries.
1025
- * Each bit-packed entry contains:
1026
- * - BASE array value in bits 10-30
1027
- * - LCHECK array value in bits 0-7
1028
- * - LEAF array value in bit 9
1029
- * Entries containing indexes of replacement sequences have set bit 31
1030
- */
1031
- struct xcda_array_view {
1032
- public:
1033
- xcda_array_view(const uint32_t * xcda_array, size_t xcda_array_size) : xcda_array(xcda_array), xcda_array_size(xcda_array_size) {
1034
- }
1035
- uint32_t get_base(size_t index) {
1036
- uint32_t packed_node = get_node(index);
1037
- return (packed_node >> 10) << ((packed_node & (1U << 9)) >> 6);
1038
- }
1039
- uint32_t get_lcheck(size_t index) {
1040
- uint32_t packed_node = get_node(index);
1041
- return packed_node & ((1U << 31) | 0xff);
1042
- }
1043
- bool get_leaf(size_t index) {
1044
- uint32_t packed_node = get_node(index);
1045
- return (packed_node >> 8) & 1;
1046
- }
1047
- uint32_t get_value(size_t index) {
1048
- uint32_t packed_node = get_node(index);
1049
- return packed_node & ((1U << 31) - 1);
1050
- }
1051
- private:
1052
- uint32_t get_node(size_t index) {
1053
- if (index > xcda_array_size) {
1054
- throw std::runtime_error("Index out of array bounds in XCDA array!");
1055
- }
1056
- return xcda_array[index];
1057
- }
1058
- const uint32_t * xcda_array;
1059
- size_t xcda_array_size;
1060
- };
1061
-
1062
- // this structure stores the best tokenization so far at input_offset
1063
- struct best_tokenization {
1064
- llama_token token_id;
1065
- size_t input_offset;
1066
- float score_sum;
1067
- };
1068
-
1069
- struct normalization_result normalize_prefix(const std::string & input, size_t input_offset) {
1070
- if (input_offset == input.size()) {
1071
- return { &input[input_offset], 0, 0 };
1072
- }
1073
-
1074
- // if input prefix matches some user-defined token return this token as normalization result
1075
- auto user_defined_token_match =
1076
- ugm_tokenizer->user_defined_token_matcher.get_longest_prefix(&input[input_offset], input.size() - input_offset);
1077
- if (user_defined_token_match.second > 0) {
1078
- return { &input[input_offset], user_defined_token_match.second, user_defined_token_match.second };
1079
- }
1080
-
1081
- size_t longest_prefix_length = 0;
1082
- size_t longest_prefix_offset = 0;
1083
-
1084
- if (ugm_tokenizer->xcda_array_size > 0) {
1085
- struct xcda_array_view xcda_view(ugm_tokenizer->xcda_array, ugm_tokenizer->xcda_array_size);
1086
-
1087
- // Find the longest normalized sequence matching the input prefix by walking
1088
- // the XOR-compressed compact double array (XCDA) starting from the root node
1089
- // We find the index of the next node by calculating BASE[s] ^ c where s is
1090
- // the index of the previous node and c is a numerical character value
1091
- uint32_t node_index = 0;
1092
- // get BASE of the root node
1093
- node_index = xcda_view.get_base(node_index);
1094
- for (size_t prefix_offset = input_offset; prefix_offset < input.size(); prefix_offset++) {
1095
- unsigned char c = input[prefix_offset];
1096
- if (c == 0) {
1097
- break;
1098
- }
1099
- node_index ^= c;
1100
- // if value of LCHECK is not c it means that this is not a child of
1101
- // the previous node, so we stop matching
1102
- if (xcda_view.get_lcheck(node_index) != c) {
1103
- break;
1104
- }
1105
- bool is_leaf = xcda_view.get_leaf(node_index);
1106
- // get BASE of the current node
1107
- node_index ^= xcda_view.get_base(node_index);
1108
- // if LEAF of the current node is true, it means that its BASE points to the node
1109
- // containing index of replacement sequence for currently matched input prefix
1110
- if (is_leaf)
1111
- {
1112
- longest_prefix_length = prefix_offset - input_offset + 1;
1113
- // get index of replacement sequence for currently matched input prefix
1114
- longest_prefix_offset = xcda_view.get_value(node_index);
1115
- }
1116
- }
1117
- }
1118
-
1119
- if (longest_prefix_length > 0) {
1120
- // we have a match, so return the replacement sequence
1121
- if (longest_prefix_offset >= ugm_tokenizer->prefix_replacements_size) {
1122
- throw std::runtime_error("Index out of array bounds in precompiled charsmap!");
1123
- }
1124
- const char * prefix_replacement = &(ugm_tokenizer->prefix_replacements)[longest_prefix_offset];
1125
- return { prefix_replacement, strlen(prefix_replacement), longest_prefix_length };
1126
- }
1127
-
1128
- // check if the input prefix contains a valid sequence of UTF-8 code units
1129
- try {
1130
- // if yes, return this sequence unmodified
1131
- size_t prefix_offset = input_offset;
1132
- unicode_cpt_from_utf8(input, prefix_offset);
1133
- return { &input[input_offset], prefix_offset - input_offset, prefix_offset - input_offset };
1134
- } catch (std::invalid_argument & /*ex*/) {
1135
- // if no, consume 1 byte and return U+FFFD - REPLACEMENT CHARACTER
1136
- return { "\xEF\xBF\xBD", 3, 1 };
1137
- }
1138
- }
1139
-
1140
- const llama_vocab & vocab;
1141
- const llm_tokenizer_ugm * ugm_tokenizer;
1142
- };
1143
-
1144
- //
1145
- // RWKV tokenizer
1146
- //
1147
-
1148
- static std::vector<uint8_t> llama_unescape_rwkv_token(const std::string & escaped) {
1149
- std::vector<uint8_t> output;
1150
- output.reserve(escaped.size());
1151
-
1152
- // Parser state
1153
- bool escaping = false;
1154
- uint8_t hex_remaining = 0;
1155
- uint8_t hex_acc = 0;
1156
-
1157
- // Step through characters, performing parsing
1158
- for (const char & c : escaped) {
1159
- // If we're parsing a hex code, interpret the next character
1160
- if (hex_remaining != 0) {
1161
- uint8_t value = (c >= 'a') ? (c - 'a' + 10) : (c - '0');
1162
- hex_acc = (hex_acc << 4) + value;
1163
-
1164
- hex_remaining -= 1;
1165
- if (hex_remaining == 0) {
1166
- output.push_back(hex_acc);
1167
- hex_acc = 0;
1168
- }
1169
-
1170
- continue;
1171
- }
1172
-
1173
- // If we got an escape character, interpret it
1174
- if (escaping) {
1175
- if (c == 't') {
1176
- output.push_back('\t');
1177
- } else if (c == 'n') {
1178
- output.push_back('\n');
1179
- } else if (c == 'r') {
1180
- output.push_back('\r');
1181
- } else if (c == 'x') {
1182
- hex_remaining = 2;
1183
- } else {
1184
- output.push_back(c);
1185
- }
1186
-
1187
- escaping = false;
1188
- continue;
1189
- }
1190
-
1191
- if (c == '\\') {
1192
- escaping = true;
1193
- continue;
1194
- }
1195
-
1196
- output.push_back(c);
1197
- }
1198
-
1199
- return output;
1200
- }
1201
-
1202
- struct llm_tokenizer_rwkv : llm_tokenizer {
1203
- llm_tokenizer_rwkv(const llama_vocab & vocab) : llm_tokenizer() {
1204
- // RWKV supports arbitrary byte tokens, but the vocab struct only supports string tokens.
1205
- // For now, we decode the vocab here into the lookup we'll use for tokenization.
1206
-
1207
- // build trie
1208
- for (unsigned int id = 0; id < vocab.id_to_token.size(); ++id) {
1209
- const auto & token = vocab.id_to_token[id];
1210
- const auto data = llama_unescape_rwkv_token(token.text);
1211
- token_matcher.insert((const char *) data.data(), data.size(), id);
1212
- }
1213
- }
1214
-
1215
- struct naive_trie token_matcher;
1216
- };
1217
-
1218
- struct llm_tokenizer_rwkv_session {
1219
- llm_tokenizer_rwkv_session(const llama_vocab & vocab) : vocab(vocab),
1220
- rwkv_tokenizer(static_cast<const llm_tokenizer_rwkv &>(*vocab.tokenizer)) {}
1221
-
1222
- void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
1223
- uint32_t position = 0;
1224
- while (position < text.size()) {
1225
- const struct naive_trie * node = rwkv_tokenizer.token_matcher.traverse(text[position]);
1226
- if (node == NULL) {
1227
- // no matching token found, add unknown token
1228
- output.push_back(vocab.special_unk_id);
1229
- position += 1;
1230
- continue;
1231
- }
1232
-
1233
- // traverse the trie to find the longest matching token
1234
- uint32_t token_id = 0;
1235
- uint32_t token_length = 0;
1236
- while (node != NULL) {
1237
- if (node->has_value) {
1238
- token_id = node->value;
1239
- token_length = position + 1;
1240
- }
1241
- node = node->traverse(text[++position]);
1242
- }
1243
-
1244
- // add the longest matching token
1245
- output.push_back(token_id);
1246
- position = token_length;
1247
- }
1248
- }
1249
-
1250
- private:
1251
- const llama_vocab & vocab;
1252
- const llm_tokenizer_rwkv & rwkv_tokenizer;
1253
- };
1254
-
1255
- void llama_vocab::init_tokenizer() {
1256
- switch (type) {
1257
- case LLAMA_VOCAB_TYPE_SPM:
1258
- tokenizer = new llm_tokenizer_spm(*this);
1259
- break;
1260
- case LLAMA_VOCAB_TYPE_BPE:
1261
- tokenizer = new llm_tokenizer_bpe(*this);
1262
- break;
1263
- case LLAMA_VOCAB_TYPE_WPM:
1264
- tokenizer = new llm_tokenizer_wpm(*this);
1265
- break;
1266
- case LLAMA_VOCAB_TYPE_UGM:
1267
- tokenizer = new llm_tokenizer_ugm(*this);
1268
- break;
1269
- case LLAMA_VOCAB_TYPE_RWKV:
1270
- tokenizer = new llm_tokenizer_rwkv(*this);
1271
- break;
1272
- default:
1273
- LM_GGML_ABORT("unsupported vocab type");
1274
- }
1275
- }
1276
-
1277
- //
1278
- // (de-) tokenize
1279
- //
1280
-
1281
- typedef enum FRAGMENT_BUFFER_VARIANT_TYPE {
1282
- FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN,
1283
- FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT
1284
- } FRAGMENT_BUFFER_VARIANT_TYPE;
1285
-
1286
- struct fragment_buffer_variant {
1287
- fragment_buffer_variant(llama_vocab::id _token)
1288
- :
1289
- type(FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN),
1290
- token(_token),
1291
- raw_text(_dummy),
1292
- offset(0),
1293
- length(0) {}
1294
-
1295
- fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
1296
- :
1297
- type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
1298
- token((llama_vocab::id) - 1),
1299
- raw_text(_raw_text),
1300
- offset(_offset),
1301
- length(_length){
1302
- LM_GGML_ASSERT(_offset >= 0);
1303
- LM_GGML_ASSERT(_length >= 1);
1304
- LM_GGML_ASSERT(offset + length <= raw_text.length());
1305
- }
1306
-
1307
- const FRAGMENT_BUFFER_VARIANT_TYPE type;
1308
- const llama_vocab::id token;
1309
- const std::string _dummy;
1310
- const std::string & raw_text;
1311
- const uint64_t offset;
1312
- const uint64_t length;
1313
- };
1314
-
1315
- // #define PRETOKENIZERDEBUG
1316
-
1317
- static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer, bool parse_special) {
1318
- // for each special token
1319
- for (const llama_vocab::id special_id : vocab.cache_special_tokens) {
1320
- const auto & data = vocab.id_to_token[special_id];
1321
- const auto & special_token = data.text;
1322
-
1323
- if (!parse_special && (data.attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_UNKNOWN))) {
1324
- // Ignore control and unknown tokens when parse_special == false
1325
- continue;
1326
- // User-defined tokens are still pre-tokenized before everything else
1327
- // ref: https://github.com/huggingface/tokenizers/blob/fdd26ba9a3f0c133427aab0423888cbde91362d7/tokenizers/src/tokenizer/mod.rs#L726
1328
- // This is mostly relevant for neox-style tokenizers (mpt, olmo, stablelm, etc.)
1329
- }
1330
-
1331
- // for each text fragment
1332
- std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
1333
- while (it != buffer.end()) {
1334
- auto & fragment = (*it);
1335
-
1336
- // if a fragment is text ( not yet processed )
1337
- if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
1338
- const auto & raw_text = fragment.raw_text;
1339
-
1340
- auto raw_text_base_offset = fragment.offset;
1341
- auto raw_text_base_length = fragment.length;
1342
-
1343
- // loop over the text
1344
- while (true) {
1345
- // find the first occurrence of a given special token in this fragment
1346
- // passing offset argument only limit the "search area" but match coordinates
1347
- // are still relative to the source full raw_text
1348
- auto match = raw_text.find(special_token, raw_text_base_offset);
1349
-
1350
- // no occurrences found, stop processing this fragment for a given special token
1351
- if (match == std::string::npos) break;
1352
-
1353
- // check if match is within bounds of offset <-> length
1354
- if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
1355
-
1356
- #ifdef PRETOKENIZERDEBUG
1357
- LLAMA_LOG_WARN("FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
1358
- #endif
1359
- auto source = std::distance(buffer.begin(), it);
1360
-
1361
- // if match is further than base offset
1362
- // then we have some text to the left of it
1363
- if (match > raw_text_base_offset) {
1364
- // left
1365
- const int64_t left_reminder_offset = raw_text_base_offset + 0;
1366
- int64_t left_reminder_length = match - raw_text_base_offset;
1367
-
1368
- if (data.attr & LLAMA_TOKEN_ATTR_LSTRIP) {
1369
- while (left_reminder_length > 0 && isspace(raw_text[left_reminder_offset + left_reminder_length - 1])) {
1370
- left_reminder_length--;
1371
- }
1372
- }
1373
-
1374
- if (left_reminder_length > 0) {
1375
- buffer.emplace_after(it, raw_text, left_reminder_offset, left_reminder_length);
1376
- it++;
1377
- }
1378
-
1379
- #ifdef PRETOKENIZERDEBUG
1380
- LLAMA_LOG_WARN("FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
1381
- #endif
1382
- }
1383
-
1384
- // special token
1385
- buffer.emplace_after(it, special_id);
1386
- it++;
1387
-
1388
- // right
1389
- if (match + special_token.length() < raw_text_base_offset + raw_text_base_length) {
1390
- int64_t right_reminder_offset = match + special_token.length();
1391
- int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
1392
-
1393
- if (data.attr & LLAMA_TOKEN_ATTR_RSTRIP) {
1394
- while (right_reminder_length > 0 && isspace(raw_text[right_reminder_offset])) {
1395
- right_reminder_offset++;
1396
- right_reminder_length--;
1397
- }
1398
- }
1399
-
1400
- if (right_reminder_length > 0) {
1401
- buffer.emplace_after(it, raw_text, right_reminder_offset, right_reminder_length);
1402
- it++;
1403
- }
1404
-
1405
- #ifdef PRETOKENIZERDEBUG
1406
- LLAMA_LOG_WARN("FR: (%ld %ld) '%s'\n", right_reminder_offset, right_reminder_length, raw_text->substr(right_reminder_offset, right_reminder_length).c_str());
1407
- #endif
1408
-
1409
- if (source == 0) {
1410
- buffer.erase_after(buffer.before_begin());
1411
- } else {
1412
- buffer.erase_after(std::next(buffer.begin(), (source-1)));
1413
- }
1414
-
1415
- // repeat for the right side
1416
- raw_text_base_offset = right_reminder_offset;
1417
- raw_text_base_length = right_reminder_length;
1418
-
1419
- #ifdef PRETOKENIZERDEBUG
1420
- LLAMA_LOG_WARN("RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
1421
- #endif
1422
- } else {
1423
- if (source == 0) {
1424
- buffer.erase_after(buffer.before_begin());
1425
- } else {
1426
- buffer.erase_after(std::next(buffer.begin(), (source-1)));
1427
- }
1428
- break;
1429
- }
1430
- }
1431
- }
1432
- it++;
1433
- }
1434
- }
1435
- }
1436
-
1437
- std::vector<llama_vocab::id> llama_tokenize_internal(
1438
- const llama_vocab & vocab,
1439
- std::string raw_text,
1440
- bool add_special,
1441
- bool parse_special) {
1442
- LM_GGML_ASSERT(vocab.tokenizer && "Tokenizer not initialized. Call llama_vocab::init_tokenizer() first.");
1443
-
1444
- std::vector<llama_vocab::id> output;
1445
- std::forward_list<fragment_buffer_variant> fragment_buffer;
1446
-
1447
- if (!raw_text.empty()) {
1448
- fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
1449
- tokenizer_st_partition(vocab, fragment_buffer, parse_special);
1450
- }
1451
-
1452
- switch (vocab.type) {
1453
- case LLAMA_VOCAB_TYPE_SPM:
1454
- {
1455
- // OG tokenizer behavior:
1456
- //
1457
- // tokenizer.encode('', add_special_tokens=True) returns [1]
1458
- // tokenizer.encode('', add_special_tokens=False) returns []
1459
-
1460
- bool is_prev_special = true; // prefix with space if first token
1461
-
1462
- if (add_special && vocab.tokenizer_add_bos) {
1463
- LM_GGML_ASSERT(vocab.special_bos_id != -1);
1464
- output.push_back(vocab.special_bos_id);
1465
- is_prev_special = true;
1466
- }
1467
-
1468
- for (const auto & fragment : fragment_buffer) {
1469
- if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
1470
- auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
1471
-
1472
- // prefix with space if previous is special
1473
- if (vocab.tokenizer_add_space_prefix && is_prev_special) {
1474
- raw_text = " " + raw_text;
1475
- }
1476
-
1477
- #ifdef PRETOKENIZERDEBUG
1478
- LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
1479
- #endif
1480
- llama_escape_whitespace(raw_text);
1481
- llm_tokenizer_spm_session session(vocab);
1482
- session.tokenize(raw_text, output);
1483
- is_prev_special = false;
1484
- } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
1485
- output.push_back(fragment.token);
1486
- is_prev_special = true;
1487
- }
1488
- }
1489
-
1490
- if (add_special && vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
1491
- LLAMA_LOG_WARN(
1492
- "%s: Added a BOS token to the prompt as specified by the model but the prompt "
1493
- "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
1494
- "Are you sure this is what you want?\n", __FUNCTION__);
1495
- }
1496
-
1497
- if (add_special && vocab.tokenizer_add_eos) {
1498
- LM_GGML_ASSERT(vocab.special_eos_id != -1);
1499
- output.push_back(vocab.special_eos_id);
1500
- }
1501
- } break;
1502
- case LLAMA_VOCAB_TYPE_BPE:
1503
- {
1504
- llm_tokenizer_bpe_session session(vocab);
1505
- // it calls some other methods that are not exist in llm_tokenizer,
1506
- // here just cast it to bpe tokenizer object
1507
- if (add_special) {
1508
- session.append_bos(output);
1509
- }
1510
- for (const auto & fragment : fragment_buffer) {
1511
- if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
1512
- auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
1513
-
1514
- #ifdef PRETOKENIZERDEBUG
1515
- LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
1516
- #endif
1517
- session.tokenize(raw_text, output);
1518
- } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
1519
- session.append(fragment.token, output);
1520
- }
1521
- }
1522
-
1523
- if (add_special) {
1524
- session.append_eos(output);
1525
- session.check_double_bos_eos(output);
1526
- }
1527
- } break;
1528
- case LLAMA_VOCAB_TYPE_WPM:
1529
- {
1530
- if (add_special) {
1531
- LM_GGML_ASSERT(vocab.special_cls_id != -1);
1532
- output.push_back(vocab.special_cls_id);
1533
- }
1534
-
1535
- llm_tokenizer_wpm_session session(vocab);
1536
-
1537
- for (const auto & fragment : fragment_buffer) {
1538
- if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
1539
- auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
1540
-
1541
- #ifdef PRETOKENIZERDEBUG
1542
- LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
1543
- #endif
1544
- session.tokenize(raw_text, output);
1545
- } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
1546
- output.push_back(fragment.token);
1547
- }
1548
- }
1549
-
1550
- if (add_special) {
1551
- LM_GGML_ASSERT(vocab.special_sep_id != -1);
1552
- output.push_back(vocab.special_sep_id);
1553
- }
1554
- } break;
1555
- case LLAMA_VOCAB_TYPE_UGM:
1556
- {
1557
- if (add_special && vocab.tokenizer_add_bos) {
1558
- LM_GGML_ASSERT(vocab.special_bos_id != -1);
1559
- output.push_back(vocab.special_bos_id);
1560
- }
1561
- llm_tokenizer_ugm_session session(vocab);
1562
-
1563
- for (const auto & fragment : fragment_buffer) {
1564
- if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
1565
- auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
1566
- #ifdef PRETOKENIZERDEBUG
1567
- LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
1568
- #endif
1569
- session.tokenize(raw_text, output);
1570
- } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
1571
- output.push_back(fragment.token);
1572
- }
1573
- }
1574
-
1575
- if (add_special && vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
1576
- LLAMA_LOG_WARN(
1577
- "%s: Added a BOS token to the prompt as specified by the model but the prompt "
1578
- "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
1579
- "Are you sure this is what you want?\n", __FUNCTION__);
1580
- }
1581
-
1582
- if (add_special && vocab.tokenizer_add_eos) {
1583
- LM_GGML_ASSERT(vocab.special_eos_id != -1);
1584
- output.push_back(vocab.special_eos_id);
1585
- }
1586
- } break;
1587
- case LLAMA_VOCAB_TYPE_RWKV:
1588
- {
1589
- llm_tokenizer_rwkv_session session(vocab);
1590
- for (const auto & fragment : fragment_buffer) {
1591
- if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
1592
- auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
1593
-
1594
- #ifdef PRETOKENIZERDEBUG
1595
- LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
1596
- #endif
1597
-
1598
- session.tokenize(raw_text, output);
1599
- } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
1600
- output.push_back(fragment.token);
1601
- }
1602
- }
1603
- } break;
1604
- case LLAMA_VOCAB_TYPE_NONE:
1605
- LM_GGML_ABORT("fatal error");
1606
- }
1607
-
1608
- return output;
1609
- }
1610
-
1611
- llama_token llama_byte_to_token_impl(const llama_vocab & vocab, uint8_t ch) {
1612
- LM_GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
1613
- static const char * hex = "0123456789ABCDEF";
1614
- switch (llama_vocab_get_type(vocab)) {
1615
- case LLAMA_VOCAB_TYPE_SPM:
1616
- case LLAMA_VOCAB_TYPE_UGM: {
1617
- const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
1618
- auto token = vocab.token_to_id.find(buf);
1619
- if (token != vocab.token_to_id.end()) {
1620
- return (*token).second;
1621
- }
1622
- // Try to fall back to just the byte as a string
1623
- const char buf2[2] = { (char)ch, 0 };
1624
- return vocab.token_to_id.at(buf2);
1625
- }
1626
- case LLAMA_VOCAB_TYPE_WPM:
1627
- case LLAMA_VOCAB_TYPE_BPE: {
1628
- return vocab.token_to_id.at(unicode_byte_to_utf8(ch));
1629
- }
1630
- default:
1631
- LM_GGML_ABORT("fatal error");
1632
- }
1633
- }
1634
-
1635
- const char * llama_token_get_text_impl(const struct llama_vocab & vocab, llama_token token) {
1636
- LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
1637
- return vocab.id_to_token[token].text.c_str();
1638
- }
1639
-
1640
- float llama_token_get_score_impl(const struct llama_vocab & vocab, llama_token token) {
1641
- LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
1642
- return vocab.id_to_token[token].score;
1643
- }
1644
-
1645
- llama_token_attr llama_token_get_attr_impl(const struct llama_vocab & vocab, llama_token token) {
1646
- LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
1647
- return vocab.id_to_token[token].attr;
1648
- }
1649
-
1650
- bool llama_token_is_eog_impl(const struct llama_vocab & vocab, llama_token token) {
1651
- return token != -1 && vocab.special_eog_ids.count(token) > 0;
1652
- }
1653
-
1654
- bool llama_token_is_control_impl(const struct llama_vocab & vocab, llama_token token) {
1655
- return llama_is_control_token(vocab, token);
1656
- }
1657
-
1658
- llama_token llama_token_bos_impl(const struct llama_vocab & vocab) {
1659
- return vocab.special_bos_id;
1660
- }
1661
-
1662
- llama_token llama_token_eos_impl(const struct llama_vocab & vocab) {
1663
- return vocab.special_eos_id;
1664
- }
1665
-
1666
- llama_token llama_token_eot_impl(const struct llama_vocab & vocab) {
1667
- return vocab.special_eot_id;
1668
- }
1669
-
1670
- llama_token llama_token_eom_impl(const struct llama_vocab & vocab) {
1671
- return vocab.special_eom_id;
1672
- }
1673
-
1674
- llama_token llama_token_cls_impl(const struct llama_vocab & vocab) {
1675
- return vocab.special_cls_id;
1676
- }
1677
-
1678
- llama_token llama_token_sep_impl(const struct llama_vocab & vocab) {
1679
- return vocab.special_sep_id;
1680
- }
1681
-
1682
- llama_token llama_token_nl_impl(const struct llama_vocab & vocab) {
1683
- return vocab.linefeed_id;
1684
- }
1685
-
1686
- llama_token llama_token_pad_impl(const struct llama_vocab & vocab) {
1687
- return vocab.special_pad_id;
1688
- }
1689
-
1690
- bool llama_add_bos_token_impl(const struct llama_vocab & vocab) {
1691
- return vocab.tokenizer_add_bos;
1692
- }
1693
-
1694
- bool llama_add_eos_token_impl(const struct llama_vocab & vocab) {
1695
- return vocab.tokenizer_add_eos;
1696
- }
1697
-
1698
- llama_token llama_token_prefix_impl(const struct llama_vocab & vocab) {
1699
- return vocab.special_fim_pre_id;
1700
- }
1701
-
1702
- llama_token llama_token_middle_impl(const struct llama_vocab & vocab) {
1703
- return vocab.special_fim_mid_id;
1704
- }
1705
-
1706
- llama_token llama_token_suffix_impl(const struct llama_vocab & vocab) {
1707
- return vocab.special_fim_suf_id;
1708
- }
1709
-
1710
- llama_token llama_token_fim_pre_impl(const struct llama_vocab & vocab) {
1711
- return vocab.special_fim_pre_id;
1712
- }
1713
-
1714
- llama_token llama_token_fim_suf_impl(const struct llama_vocab & vocab) {
1715
- return vocab.special_fim_suf_id;
1716
- }
1717
-
1718
- llama_token llama_token_fim_mid_impl(const struct llama_vocab & vocab) {
1719
- return vocab.special_fim_mid_id;
1720
- }
1721
-
1722
- llama_token llama_token_fim_pad_impl(const struct llama_vocab & vocab) {
1723
- return vocab.special_fim_pad_id;
1724
- }
1725
-
1726
- llama_token llama_token_fim_rep_impl(const struct llama_vocab & vocab) {
1727
- return vocab.special_fim_rep_id;
1728
- }
1729
-
1730
- llama_token llama_token_fim_sep_impl(const struct llama_vocab & vocab) {
1731
- return vocab.special_fim_sep_id;
1732
- }
1733
-
1734
- int32_t llama_tokenize_impl(
1735
- const struct llama_vocab & vocab,
1736
- const char * text,
1737
- int32_t text_len,
1738
- llama_token * tokens,
1739
- int32_t n_tokens_max,
1740
- bool add_special,
1741
- bool parse_special) {
1742
- auto res = llama_tokenize_internal(vocab, std::string(text, text_len), add_special, parse_special);
1743
- if (n_tokens_max < (int) res.size()) {
1744
- // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
1745
- return -((int) res.size());
1746
- }
1747
-
1748
- for (size_t i = 0; i < res.size(); i++) {
1749
- tokens[i] = res[i];
1750
- }
1751
-
1752
- return res.size();
1753
- }
1754
-
1755
- static std::string llama_decode_text(const std::string & text) {
1756
- std::string decoded_text;
1757
-
1758
- const auto cpts = unicode_cpts_from_utf8(text);
1759
- for (const auto cpt : cpts) {
1760
- const auto utf8 = unicode_cpt_to_utf8(cpt);
1761
- try {
1762
- decoded_text += unicode_utf8_to_byte(utf8);
1763
- } catch (const std::out_of_range & /*e*/) {
1764
- decoded_text += "[UNK_BYTE_0x";
1765
- for (const auto c : utf8) {
1766
- decoded_text += format("%02x", (uint8_t) c);
1767
- }
1768
- decoded_text += text + "]";
1769
- }
1770
- }
1771
-
1772
- return decoded_text;
1773
- }
1774
-
1775
- // does not write null-terminator to buf
1776
- int32_t llama_token_to_piece_impl(const struct llama_vocab & vocab, llama_token token, char * buf, int32_t length, int32_t lstrip, bool special) {
1777
- // ref: https://github.com/ggerganov/llama.cpp/pull/7587#discussion_r1620983843
1778
- static const int attr_special = LLAMA_TOKEN_ATTR_UNKNOWN | LLAMA_TOKEN_ATTR_CONTROL;
1779
- const llama_token_attr attr = llama_token_get_attr_impl(vocab, token);
1780
- if (!special && (attr & attr_special)) {
1781
- return 0;
1782
- }
1783
-
1784
- // copy piece chars to output text buffer
1785
- // skip up to 'lstrip' leading spaces before copying
1786
- auto _try_copy = [=] (const char * token, size_t size) -> int32_t {
1787
- for (int32_t i = 0; i < lstrip && size && *token == ' '; ++i) {
1788
- token++;
1789
- size--;
1790
- }
1791
- if (length < (int32_t)size) {
1792
- return -(int32_t) size;
1793
- }
1794
- memcpy(buf, token, size);
1795
- return (int32_t) size;
1796
- };
1797
-
1798
- // if we have a cache - use it
1799
- {
1800
- const auto & cache = vocab.cache_token_to_piece;
1801
-
1802
- if (!cache.empty()) {
1803
- const auto & result = cache.at(token);
1804
- return _try_copy(result.data(), result.size());
1805
- }
1806
- }
1807
-
1808
- if (0 <= token && token < (int32_t) vocab.id_to_token.size()) {
1809
- const std::string & token_text = vocab.id_to_token[token].text;
1810
- switch (llama_vocab_get_type(vocab)) {
1811
- case LLAMA_VOCAB_TYPE_WPM:
1812
- case LLAMA_VOCAB_TYPE_SPM:
1813
- case LLAMA_VOCAB_TYPE_UGM: {
1814
- // NOTE: we accept all unsupported token types,
1815
- // suppressing them like CONTROL tokens.
1816
- if (attr & (attr_special | LLAMA_TOKEN_ATTR_USER_DEFINED)) {
1817
- return _try_copy(token_text.data(), token_text.size());
1818
- }
1819
- if (attr & LLAMA_TOKEN_ATTR_NORMAL) {
1820
- std::string result = token_text;
1821
- llama_unescape_whitespace(result);
1822
- return _try_copy(result.data(), result.size());
1823
- }
1824
- if (attr & LLAMA_TOKEN_ATTR_BYTE) {
1825
- char byte = (char) llama_token_to_byte(vocab, token);
1826
- return _try_copy((char*) &byte, 1);
1827
- }
1828
- break;
1829
- }
1830
- case LLAMA_VOCAB_TYPE_BPE: {
1831
- // NOTE: we accept all unsupported token types,
1832
- // suppressing them like CONTROL tokens.
1833
- if (attr & (attr_special | LLAMA_TOKEN_ATTR_USER_DEFINED)) {
1834
- return _try_copy(token_text.data(), token_text.size());
1835
- }
1836
- if (attr & LLAMA_TOKEN_ATTR_NORMAL) {
1837
- std::string result = llama_decode_text(token_text);
1838
- return _try_copy(result.data(), result.size());
1839
- }
1840
- break;
1841
- }
1842
- case LLAMA_VOCAB_TYPE_RWKV: {
1843
- std::vector<uint8_t> result = llama_unescape_rwkv_token(token_text);
1844
-
1845
- // If we don't have enough space, return an error
1846
- if (result.size() > (size_t)length) {
1847
- return -(int)result.size();
1848
- }
1849
-
1850
- memcpy(buf, result.data(), result.size());
1851
- return (int)result.size();
1852
- }
1853
- default:
1854
- LM_GGML_ABORT("fatal error");
1855
- }
1856
- }
1857
-
1858
- return 0;
1859
- }
1860
-
1861
- int32_t llama_detokenize_impl(
1862
- const struct llama_vocab & vocab,
1863
- const llama_token * tokens,
1864
- int32_t n_tokens,
1865
- char * text,
1866
- int32_t text_len_max,
1867
- bool remove_special,
1868
- bool unparse_special) {
1869
- LM_GGML_ASSERT(vocab.tokenizer && "Tokenizer not initialized. Call llama_vocab::init_tokenizer() first.");
1870
-
1871
- int32_t avail = text_len_max;
1872
- int32_t total = 0;
1873
-
1874
- // remove the leading space
1875
- bool remove_space = vocab.tokenizer_add_space_prefix;
1876
-
1877
- if (remove_special && vocab.tokenizer_add_bos) {
1878
- if (n_tokens > 0 && tokens[0] == vocab.special_bos_id) {
1879
- remove_space = false;
1880
- n_tokens--;
1881
- tokens++;
1882
- }
1883
- }
1884
-
1885
- if (remove_special && vocab.tokenizer_add_eos) {
1886
- if (n_tokens > 0 && tokens[n_tokens-1] == vocab.special_eos_id) {
1887
- n_tokens--;
1888
- }
1889
- }
1890
-
1891
- for (int32_t i = 0; i < n_tokens; ++i) {
1892
- LM_GGML_ASSERT(avail >= 0);
1893
- int32_t n_chars = llama_token_to_piece_impl(vocab, tokens[i], text, avail, remove_space, unparse_special);
1894
- remove_space = false;
1895
- if (n_chars < 0) {
1896
- avail = 0;
1897
- total -= n_chars;
1898
- } else if (n_chars > 0) {
1899
- avail -= n_chars;
1900
- text += n_chars;
1901
- total += n_chars;
1902
- }
1903
- }
1904
-
1905
- if (total > text_len_max) {
1906
- return -total;
1907
- }
1908
-
1909
- if (vocab.tokenizer_clean_spaces) {
1910
- text -= total; // restart text
1911
-
1912
- // first pass: characters ?!., //TODO: where do these characters come from?
1913
- const int32_t total1 = total;
1914
- total = total ? 1 : 0;
1915
- for (int32_t i = 1; i < total1; ++i) {
1916
- const char x = text[i];
1917
- if (text[i - 1] == ' ') {
1918
- if (x == '?' || x == '!' || x == '.' || x == ',') { // " ?", " !", " .", " ,"
1919
- total--; // remove space
1920
- }
1921
- }
1922
- text[total++] = x;
1923
- }
1924
-
1925
- // second pass: strip single apostrophe between spaces
1926
- const int32_t total2 = total;
1927
- total = total ? 1 : 0;
1928
- for (int32_t i = 1; i < total2; ++i) {
1929
- const char x = text[i];
1930
- if (x == '\'' && i + 1 < total2 && text[i - 1] == ' ' && text[i + 1] == ' ') { // " ' "
1931
- total--; // remove prev space
1932
- text[++i] = '\0'; // remove next space
1933
- }
1934
- text[total++] = x;
1935
- }
1936
-
1937
- // third pass: apostrophe contractions //NOTE: this makes sense?
1938
- const int32_t total3 = total;
1939
- total = total ? 1 : 0;
1940
- for (int32_t i = 1; i < total3; ++i) {
1941
- const char x = text[i];
1942
- if (text[i - 1] == ' ') {
1943
- if (x == '\'' && i + 1 < total3) {
1944
- const char x1 = text[i + 1];
1945
- if (x1 == 't' || x1 == 'd') { // " 't", " 'd"
1946
- //total--; // remove space
1947
- } else if (x1 == 's' || x1 == 'm') { // " 's", " 'm"
1948
- total--; // remove space
1949
- } else if (i + 2 < total3) {
1950
- const char x2 = text[i + 2];
1951
- if ((x1 == 'l' && x2 == 'l')) { // " 'll"
1952
- //total--; // remove space
1953
- } else if ((x1 == 'r' && x2 == 'e') || (x1 == 'v' && x2 == 'e')) { // " 're", " 've"
1954
- total--; // remove space
1955
- } else {
1956
- //total--; // remove space
1957
- }
1958
- } else {
1959
- //total--; // remove space
1960
- }
1961
- }
1962
- }
1963
- text[total++] = x;
1964
- }
1965
- }
1966
-
1967
- return total <= text_len_max ? total : -total;
1968
- }
1969
-
1970
- std::string llama_detokenize(const struct llama_vocab & vocab, const std::vector<llama_token> & tokens, bool special) {
1971
- std::string text;
1972
- text.resize(std::max(text.capacity(), tokens.size()));
1973
- int32_t n_chars = llama_detokenize_impl(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
1974
- if (n_chars < 0) {
1975
- text.resize(-n_chars);
1976
- n_chars = llama_detokenize_impl(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
1977
- LM_GGML_ASSERT(n_chars <= (int32_t)text.size()); // whitespace trimming is performed after per-token detokenization
1978
- }
1979
-
1980
- text.resize(n_chars);
1981
-
1982
- // NOTE: the original tokenizer decodes bytes after collecting the pieces.
1983
- return text;
1984
- }
1
+ #include "llama-vocab.h"
2
+
3
+ #include "unicode.h"
4
+
5
+ #include <algorithm>
6
+ #include <cassert>
7
+ #include <cfloat>
8
+ #include <climits>
9
+ #include <cstdarg>
10
+ #include <cstring>
11
+ #include <forward_list>
12
+ #include <queue>
13
+ #include <sstream>
14
+
15
+ //
16
+ // helpers
17
+ //
18
+
19
+ LLAMA_ATTRIBUTE_FORMAT(1, 2)
20
+ static std::string format(const char * fmt, ...) {
21
+ va_list ap;
22
+ va_list ap2;
23
+ va_start(ap, fmt);
24
+ va_copy(ap2, ap);
25
+ int size = vsnprintf(NULL, 0, fmt, ap);
26
+ LM_GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
27
+ std::vector<char> buf(size + 1);
28
+ int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
29
+ LM_GGML_ASSERT(size2 == size);
30
+ va_end(ap2);
31
+ va_end(ap);
32
+ return std::string(buf.data(), size);
33
+ }
34
+
35
+ struct naive_trie {
36
+ naive_trie() : has_value(false), value(0) {
37
+ }
38
+ void insert(const char * key, size_t len, int32_t value = 0) {
39
+ if (len == 0) {
40
+ this->has_value = true;
41
+ this->value = value;
42
+ return;
43
+ }
44
+ char c = key[0];
45
+ auto res = children.find(c);
46
+ if (res != children.end()) {
47
+ res->second.insert(key + 1, len - 1, value);
48
+ } else {
49
+ auto res = children.insert(std::make_pair(c, naive_trie()));
50
+ res.first->second.insert(key + 1, len - 1, value);
51
+ }
52
+ }
53
+ std::pair<const char *, size_t> get_longest_prefix(const char * key, size_t len, size_t offset = 0) const {
54
+ if (len == 0 || offset == len) {
55
+ return std::make_pair(key, offset);
56
+ }
57
+ char c = key[offset];
58
+ auto res = children.find(c);
59
+ if (res != children.end()) {
60
+ return res->second.get_longest_prefix(key, len, offset + 1);
61
+ }
62
+
63
+ return std::make_pair(key, offset);
64
+ }
65
+ const struct naive_trie * traverse(const char c) const {
66
+ auto res = children.find(c);
67
+ if (res != children.end()) {
68
+ return &res->second;
69
+ }
70
+
71
+ return NULL;
72
+ }
73
+ std::map<char, struct naive_trie> children;
74
+ bool has_value;
75
+ llama_token value;
76
+ };
77
+
78
+ //
79
+ // impl
80
+ //
81
+
82
+ struct llm_tokenizer {
83
+ llm_tokenizer() {}
84
+ virtual ~llm_tokenizer() = default;
85
+ };
86
+
87
+ llama_vocab::~llama_vocab() {
88
+ delete tokenizer;
89
+ }
90
+
91
+ int llama_vocab::find_bpe_rank(const std::string & token_left, const std::string & token_right) const {
92
+ LM_GGML_ASSERT(token_left.find(' ') == std::string::npos);
93
+ LM_GGML_ASSERT(token_left.find('\n') == std::string::npos);
94
+ LM_GGML_ASSERT(token_right.find(' ') == std::string::npos);
95
+ LM_GGML_ASSERT(token_right.find('\n') == std::string::npos);
96
+
97
+ auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
98
+ if (it == bpe_ranks.end()) {
99
+ return -1;
100
+ }
101
+
102
+ return it->second;
103
+ }
104
+
105
+ static enum llama_vocab_type llama_vocab_get_type(const llama_vocab & vocab) {
106
+ return vocab.type;
107
+ }
108
+
109
+ static bool llama_is_normal_token(const llama_vocab & vocab, llama_token id) {
110
+ LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
111
+ return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_NORMAL;
112
+ }
113
+
114
+ static bool llama_is_unknown_token(const llama_vocab & vocab, llama_token id) {
115
+ LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
116
+ return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_UNKNOWN;
117
+ }
118
+
119
+ static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) {
120
+ LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
121
+ return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_CONTROL;
122
+ }
123
+
124
+ static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
125
+ LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
126
+ return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_BYTE;
127
+ }
128
+
129
+ static bool llama_is_user_defined_token(const llama_vocab & vocab, llama_token id) {
130
+ LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
131
+ return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_USER_DEFINED;
132
+ }
133
+
134
+ static bool llama_is_unused_token(const llama_vocab & vocab, llama_token id) {
135
+ LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
136
+ return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_UNUSED;
137
+ }
138
+
139
+ static uint8_t llama_token_to_byte(const llama_vocab & vocab, llama_token id) {
140
+ LM_GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
141
+ LM_GGML_ASSERT(llama_is_byte_token(vocab, id));
142
+ const auto & token_data = vocab.id_to_token.at(id);
143
+ switch (llama_vocab_get_type(vocab)) {
144
+ case LLAMA_VOCAB_TYPE_SPM:
145
+ case LLAMA_VOCAB_TYPE_UGM: {
146
+ auto buf = token_data.text.substr(3, 2);
147
+ return strtol(buf.c_str(), NULL, 16);
148
+ }
149
+ case LLAMA_VOCAB_TYPE_BPE: {
150
+ LM_GGML_ABORT("fatal error");
151
+ //return unicode_utf8_to_byte(token_data.text); // TODO: why is this here after LM_GGML_ASSERT?
152
+ }
153
+ case LLAMA_VOCAB_TYPE_WPM: {
154
+ LM_GGML_ABORT("fatal error");
155
+ }
156
+ default:
157
+ LM_GGML_ABORT("fatal error");
158
+ }
159
+ }
160
+
161
+ static void llama_escape_whitespace(std::string & text) {
162
+ replace_all(text, " ", "\xe2\x96\x81");
163
+ }
164
+
165
+ static void llama_unescape_whitespace(std::string & word) {
166
+ replace_all(word, "\xe2\x96\x81", " ");
167
+ }
168
+
169
+ struct llm_symbol {
170
+ using index = int;
171
+ index prev;
172
+ index next;
173
+ const char * text;
174
+ size_t n;
175
+ };
176
+
177
+ static_assert(std::is_trivially_copyable<llm_symbol>::value, "llm_symbol is not trivially copyable");
178
+
179
+ //
180
+ // SPM tokenizer
181
+ // original implementation:
182
+ // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
183
+ //
184
+
185
+ struct llm_bigram_spm {
186
+ struct comparator {
187
+ bool operator()(llm_bigram_spm & l, llm_bigram_spm & r) {
188
+ return (l.score < r.score) || (l.score == r.score && l.left > r.left);
189
+ }
190
+ };
191
+ using queue_storage = std::vector<llm_bigram_spm>;
192
+ using queue = std::priority_queue<llm_bigram_spm, queue_storage, comparator>;
193
+ llm_symbol::index left;
194
+ llm_symbol::index right;
195
+ float score;
196
+ size_t size;
197
+ };
198
+
199
+ struct llm_tokenizer_spm : llm_tokenizer {
200
+ llm_tokenizer_spm(const llama_vocab & /*vocab*/) : llm_tokenizer() {}
201
+ };
202
+
203
+ struct llm_tokenizer_spm_session {
204
+ llm_tokenizer_spm_session(const llama_vocab & vocab) : vocab(vocab) {}
205
+
206
+ void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
207
+
208
+ // split string into utf8 chars
209
+ int index = 0;
210
+ size_t offs = 0;
211
+ while (offs < text.size()) {
212
+ llm_symbol sym;
213
+ size_t len = unicode_len_utf8(text[offs]);
214
+ sym.text = text.c_str() + offs;
215
+ sym.n = std::min(len, text.size() - offs);
216
+ offs += sym.n;
217
+ sym.prev = index - 1;
218
+ sym.next = offs == text.size() ? -1 : index + 1;
219
+ index++;
220
+ symbols.emplace_back(sym);
221
+ }
222
+
223
+ // seed the work queue with all possible 2-character tokens.
224
+ for (int i = 1; i < (int) symbols.size(); ++i) {
225
+ try_add_bigram(i - 1, i);
226
+ }
227
+
228
+ // keep substituting the highest frequency pairs for as long as we can.
229
+ while (!work_queue.empty()) {
230
+ auto bigram = work_queue.top();
231
+ work_queue.pop();
232
+
233
+ auto & left_sym = symbols[bigram.left];
234
+ auto & right_sym = symbols[bigram.right];
235
+
236
+ // if one of the symbols already got merged, skip it.
237
+ if (left_sym.n == 0 || right_sym.n == 0 ||
238
+ left_sym.n + right_sym.n != bigram.size) {
239
+ continue;
240
+ }
241
+
242
+ // merge the right sym into the left one
243
+ left_sym.n += right_sym.n;
244
+ right_sym.n = 0;
245
+
246
+ //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
247
+
248
+ // remove the right sym from the chain
249
+ left_sym.next = right_sym.next;
250
+ if (right_sym.next >= 0) {
251
+ symbols[right_sym.next].prev = bigram.left;
252
+ }
253
+
254
+ // find more substitutions
255
+ try_add_bigram(left_sym.prev, bigram.left);
256
+ try_add_bigram(bigram.left, left_sym.next);
257
+ }
258
+
259
+ for (int i = 0; i != -1; i = symbols[i].next) {
260
+ auto & symbol = symbols[i];
261
+ resegment(symbol, output);
262
+ }
263
+ }
264
+
265
+ private:
266
+ void resegment(llm_symbol & symbol, std::vector<llama_vocab::id> & output) {
267
+ auto text = std::string(symbol.text, symbol.n);
268
+ auto token = vocab.token_to_id.find(text);
269
+
270
+ // Do we need to support is_unused?
271
+ if (token != vocab.token_to_id.end()) {
272
+ output.push_back((*token).second);
273
+ return;
274
+ }
275
+
276
+ const auto p = rev_merge.find(text);
277
+
278
+ if (p == rev_merge.end()) {
279
+ // output any symbols that did not form tokens as bytes.
280
+ output.reserve(output.size() + symbol.n);
281
+ for (int j = 0; j < (int)symbol.n; ++j) {
282
+ llama_vocab::id token_id = llama_byte_to_token_impl(vocab, symbol.text[j]);
283
+ output.push_back(token_id);
284
+ }
285
+ return;
286
+ }
287
+
288
+ resegment(symbols[p->second.first], output);
289
+ resegment(symbols[p->second.second], output);
290
+ }
291
+
292
+ void try_add_bigram(int left, int right) {
293
+ if (left == -1 || right == -1) {
294
+ return;
295
+ }
296
+ const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
297
+ auto token = vocab.token_to_id.find(text);
298
+
299
+ if (token == vocab.token_to_id.end()) {
300
+ return;
301
+ }
302
+
303
+ if (static_cast<size_t>((*token).second) >= vocab.id_to_token.size()) {
304
+ return;
305
+ }
306
+
307
+ const auto & tok_data = vocab.id_to_token[(*token).second];
308
+
309
+ llm_bigram_spm bigram;
310
+ bigram.left = left;
311
+ bigram.right = right;
312
+ bigram.score = tok_data.score;
313
+ bigram.size = text.size();
314
+
315
+ work_queue.push(bigram);
316
+
317
+ // Do we need to support is_unused?
318
+ rev_merge[text] = std::make_pair(left, right);
319
+ }
320
+
321
+ const llama_vocab & vocab;
322
+ // currently unused
323
+ // const llm_tokenizer_spm * spm_tokenizer;
324
+
325
+ std::vector<llm_symbol> symbols;
326
+ llm_bigram_spm::queue work_queue;
327
+ std::map<std::string, std::pair<int, int>> rev_merge;
328
+ };
329
+
330
+ //
331
+ // BPE tokenizer
332
+ // adapted from https://github.com/cmp-nct/ggllm.cpp [MIT License]
333
+ // tried to simplify unicode stuff, so most likely does not work 100% correctly!
334
+ //
335
+
336
+ // TODO: there are a lot of common parts between spm and bpe tokenizers, should be refactored and reused
337
+
338
+ template<typename T, typename Container = std::vector<T>, typename Compare = std::less<typename Container::value_type>>
339
+ class llama_priority_queue : public std::priority_queue<T, Container, Compare> {
340
+ public:
341
+ using std::priority_queue<T, Container, Compare>::priority_queue;
342
+
343
+ T pop_move() {
344
+ T item = std::move(this->c.front());
345
+ std::pop_heap(this->c.begin(), this->c.end(), this->comp);
346
+ this->c.pop_back();
347
+ return item;
348
+ }
349
+
350
+ void pop() = delete;
351
+ };
352
+
353
+ struct llm_bigram_bpe {
354
+ struct comparator {
355
+ bool operator()(const llm_bigram_bpe & l, const llm_bigram_bpe & r) const {
356
+ return l.rank > r.rank || (l.rank == r.rank && l.left > r.left);
357
+ }
358
+ };
359
+
360
+ using queue_storage = std::vector<llm_bigram_bpe>;
361
+ using queue = llama_priority_queue<llm_bigram_bpe, queue_storage, comparator>;
362
+ llm_symbol::index left;
363
+ llm_symbol::index right;
364
+ std::string text;
365
+ int rank;
366
+ size_t size;
367
+ };
368
+
369
+ struct llm_tokenizer_bpe : llm_tokenizer {
370
+ llm_tokenizer_bpe(const llama_vocab & vocab) : llm_tokenizer() {
371
+ LM_GGML_ASSERT(vocab.type == LLAMA_VOCAB_TYPE_BPE);
372
+ switch (vocab.type_pre) {
373
+ case LLAMA_VOCAB_PRE_TYPE_LLAMA3:
374
+ regex_exprs = {
375
+ // original regex from tokenizer.json
376
+ //"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
377
+
378
+ // adapted: https://github.com/ggerganov/llama.cpp/pull/6920#issuecomment-2080233989
379
+ "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
380
+ };
381
+ break;
382
+ case LLAMA_VOCAB_PRE_TYPE_DBRX:
383
+ case LLAMA_VOCAB_PRE_TYPE_SMAUG:
384
+ regex_exprs = {
385
+ // same as llama3
386
+ "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
387
+ };
388
+ break;
389
+ case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM:
390
+ regex_exprs = {
391
+ "[\r\n]",
392
+ "\\s?[A-Za-zµÀ-ÖØ-öø-ƺƼ-ƿDŽ-ʓʕ-ʯͰ-ͳͶͷͻ-ͽͿΆΈ-ΊΌΎ-ΡΣ-ϵϷ-ҁҊ-ԯԱ-ՖႠ-ჅᎠ-Ᏽᏸ-ᏽᲐ-ᲺᲽ-Ჿᴀ-ᴫᵫ-ᵷᵹ-ᶚḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙὛὝὟ-ώᾀ-ᾴᾶ-ᾼιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼℂℇℊ-ℓℕℙ-ℝℤΩℨK-ℭℯ-ℴℹℼ-ℿⅅ-ⅉⅎↃↄⰀ-ⱻⱾ-ⳤⳫ-ⳮⳲⳳꙀ-ꙭꚀ-ꚛꜢ-ꝯꝱ-ꞇꞋ-ꞎꭰ-ꮿff-stﬓ-ﬗA-Za-z𐐀-𐑏𐒰-𐓓𐓘-𐓻𐲀-𐲲𐳀-𐳲𑢠-𑣟𞤀-𞥃]+",
393
+ "\\s?[!-/:-~!-/:-~‘-‟ -。]+",
394
+ "\\s+$",
395
+ "[一-龥ࠀ-一가-퟿]+",
396
+ "\\p{N}+",
397
+ };
398
+ break;
399
+ case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER:
400
+ regex_exprs = {
401
+ "[\r\n]",
402
+ "\\s?\\p{L}+",
403
+ "\\s?\\p{P}+",
404
+ "[一-龥ࠀ-一가-퟿]+",
405
+ "\\p{N}",
406
+ };
407
+ break;
408
+ case LLAMA_VOCAB_PRE_TYPE_FALCON:
409
+ regex_exprs = {
410
+ "[\\p{P}\\$\\+<=>\\^~\\|`]+",
411
+ "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
412
+ "[0-9][0-9][0-9]",
413
+ };
414
+ break;
415
+ case LLAMA_VOCAB_PRE_TYPE_STARCODER:
416
+ case LLAMA_VOCAB_PRE_TYPE_REFACT:
417
+ case LLAMA_VOCAB_PRE_TYPE_COMMAND_R:
418
+ case LLAMA_VOCAB_PRE_TYPE_SMOLLM:
419
+ case LLAMA_VOCAB_PRE_TYPE_CODESHELL:
420
+ case LLAMA_VOCAB_PRE_TYPE_EXAONE:
421
+ regex_exprs = {
422
+ "\\p{N}",
423
+ "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
424
+ };
425
+ break;
426
+ case LLAMA_VOCAB_PRE_TYPE_GPT2:
427
+ case LLAMA_VOCAB_PRE_TYPE_MPT:
428
+ case LLAMA_VOCAB_PRE_TYPE_OLMO:
429
+ case LLAMA_VOCAB_PRE_TYPE_JAIS:
430
+ regex_exprs = {
431
+ "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
432
+ };
433
+ break;
434
+ case LLAMA_VOCAB_PRE_TYPE_STABLELM2:
435
+ case LLAMA_VOCAB_PRE_TYPE_QWEN2:
436
+ regex_exprs = {
437
+ // original regex from tokenizer.json
438
+ // "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
439
+ "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
440
+ };
441
+ break;
442
+ case LLAMA_VOCAB_PRE_TYPE_PORO:
443
+ case LLAMA_VOCAB_PRE_TYPE_BLOOM:
444
+ case LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH:
445
+ regex_exprs = {
446
+ " ?[^(\\s|.,!?…。,、।۔،)]+",
447
+ };
448
+ break;
449
+ case LLAMA_VOCAB_PRE_TYPE_CHATGLM4:
450
+ regex_exprs = {
451
+ "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
452
+ };
453
+ break;
454
+ case LLAMA_VOCAB_PRE_TYPE_VIKING:
455
+ regex_exprs = {
456
+ " ?[^(\\s|.,!?…。,、।۔،)]+",
457
+ "\\p{N}",
458
+ };
459
+ break;
460
+ case LLAMA_VOCAB_PRE_TYPE_TEKKEN:
461
+ // original regex from tokenizer.json
462
+ // "[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]*[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]+|[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]+[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]*|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
463
+ regex_exprs = {
464
+ "[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))*((?=[\\p{L}])([^A-Z]))+|[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))+((?=[\\p{L}])([^A-Z]))*|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
465
+ };
466
+ break;
467
+ case LLAMA_VOCAB_PRE_TYPE_CHAMELEON:
468
+ // Note: in theory, the special token (sentinel and image token) regex_exprs below
469
+ // are unnecessary, as they are split in `tokenizer_st_partition` anyway.
470
+ // However, since the upstream pre-tokenizer uses them, they are also
471
+ // included here (see https://huggingface.co/facebook/chameleon-7b).
472
+ regex_exprs = {
473
+ "<sentinel:[0-9]+>", // Sentinel tokens
474
+ "(IMGIMG)((A|B|C|D|E|F|G|H|I){1,4})Z", // Image tokens
475
+ "([\\t\\n]| | )", // directly from tokenizer.json
476
+ "\\p{N}", // Individual digits
477
+ "[\\p{P}!-/:-@\\[-`{-~]", // Punctuation, Isolated
478
+ "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
479
+ };
480
+ break;
481
+ default:
482
+ // default regex for BPE tokenization pre-processing
483
+ regex_exprs = {
484
+ "[\\p{P}\\$\\+<=>\\^~\\|]+",
485
+ "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
486
+ "\\p{N}+",
487
+ "[0-9][0-9][0-9]",
488
+ };
489
+ break;
490
+ }
491
+ }
492
+
493
+ std::vector<std::string> regex_exprs;
494
+ };
495
+
496
+ struct llm_tokenizer_bpe_session {
497
+ llm_tokenizer_bpe_session(const llama_vocab & vocab) : vocab(vocab),
498
+ bpe_tokenizer(static_cast<const llm_tokenizer_bpe *>(vocab.tokenizer)) {}
499
+
500
+ static void append(const llama_vocab::id token_id, std::vector<llama_vocab::id> & output) {
501
+ output.push_back(token_id);
502
+ }
503
+
504
+ bool append_bos(std::vector<llama_vocab::id> & output) const {
505
+ if (vocab.tokenizer_add_bos) {
506
+ LM_GGML_ASSERT(vocab.special_bos_id != -1);
507
+ output.push_back(vocab.special_bos_id);
508
+ return true;
509
+ }
510
+ return false;
511
+ }
512
+
513
+ bool append_eos(std::vector<llama_vocab::id> & output) const {
514
+ if (vocab.tokenizer_add_eos) {
515
+ LM_GGML_ASSERT(vocab.special_eos_id != -1);
516
+ output.push_back(vocab.special_eos_id);
517
+ return true;
518
+ }
519
+ return false;
520
+ }
521
+
522
+ void check_double_bos_eos(const std::vector<llama_vocab::id> & output) const {
523
+ if (vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
524
+ LLAMA_LOG_WARN(
525
+ "%s: Added a BOS token to the prompt as specified by the model but the prompt "
526
+ "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
527
+ "Are you sure this is what you want?\n", __FUNCTION__);
528
+ }
529
+ if (vocab.tokenizer_add_eos && output.size() >= 2 && *(output.end()-2) == vocab.special_eos_id) {
530
+ LLAMA_LOG_WARN(
531
+ "%s: Added a EOS token to the prompt as specified by the model but the prompt "
532
+ "also ends with a EOS token. So now the final prompt ends with 2 EOS tokens. "
533
+ "Are you sure this is what you want?\n", __FUNCTION__);
534
+ }
535
+ }
536
+
537
+ void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
538
+ int final_prev_index = -1;
539
+ const auto word_collection = unicode_regex_split(text, bpe_tokenizer->regex_exprs);
540
+
541
+ symbols_final.clear();
542
+
543
+ for (const auto & word : word_collection) {
544
+ work_queue = llm_bigram_bpe::queue();
545
+ symbols.clear();
546
+
547
+ int index = 0;
548
+ size_t offset = 0;
549
+
550
+ if (vocab.tokenizer_ignore_merges && vocab.token_to_id.find(word) != vocab.token_to_id.end()) {
551
+ symbols.emplace_back(llm_symbol{-1, -1, word.c_str(), word.size()});
552
+ offset = word.size();
553
+ }
554
+
555
+ while (offset < word.size()) {
556
+ llm_symbol sym;
557
+ size_t char_len = std::min(word.size() - offset, (size_t) unicode_len_utf8(word[offset]));
558
+ sym.text = word.c_str() + offset;
559
+ sym.n = char_len;
560
+ offset += sym.n;
561
+ sym.prev = index - 1;
562
+ sym.next = offset == word.size() ? -1 : index + 1;
563
+ index++;
564
+ symbols.emplace_back(sym);
565
+ }
566
+ for (int i = 1; i < (int) symbols.size(); ++i) {
567
+ add_new_bigram(i - 1, i);
568
+ }
569
+
570
+ // build token(s)
571
+ while (!work_queue.empty()) {
572
+ auto bigram = work_queue.pop_move();
573
+
574
+ auto & left_symbol = symbols[bigram.left];
575
+ auto & right_symbol = symbols[bigram.right];
576
+
577
+ if (left_symbol.n == 0 || right_symbol.n == 0) {
578
+ continue;
579
+ }
580
+ std::string left_token = std::string(left_symbol.text, left_symbol.n);
581
+ std::string right_token = std::string(right_symbol.text, right_symbol.n);
582
+ if (left_token + right_token != bigram.text) {
583
+ continue; // Skip this bigram if it's outdated
584
+ }
585
+
586
+ // merge the right sym into the left one
587
+ left_symbol.n += right_symbol.n;
588
+ right_symbol.n = 0;
589
+
590
+ // remove the right sym from the chain
591
+ left_symbol.next = right_symbol.next;
592
+ if (right_symbol.next >= 0) {
593
+ symbols[right_symbol.next].prev = bigram.left;
594
+ }
595
+
596
+ add_new_bigram(left_symbol.prev, bigram.left); // left side of current symbol
597
+ add_new_bigram(bigram.left, left_symbol.next); // right side of current symbol
598
+ }
599
+
600
+ // add the finished tokens to the final list keeping correct order for next and prev
601
+ for (auto & sym : symbols) {
602
+ if (sym.n > 0) {
603
+ sym.prev = final_prev_index;
604
+ sym.next = -1;
605
+ if (final_prev_index != -1) {
606
+ symbols_final[final_prev_index].next = symbols_final.size();
607
+ }
608
+ symbols_final.emplace_back(sym);
609
+ final_prev_index = symbols_final.size() - 1;
610
+ }
611
+ }
612
+ }
613
+
614
+ symbols = symbols_final;
615
+
616
+ if (!symbols.empty()) {
617
+ for (int i = 0; i != -1; i = symbols[i].next) {
618
+ auto & symbol = symbols[i];
619
+ if (symbol.n == 0) {
620
+ continue;
621
+ }
622
+
623
+ const std::string str = std::string(symbol.text, symbol.n);
624
+ const auto token = vocab.token_to_id.find(str);
625
+
626
+ if (token == vocab.token_to_id.end()) {
627
+ for (auto j = str.begin(); j != str.end(); ++j) {
628
+ std::string byte_str(1, *j);
629
+ auto token_multibyte = vocab.token_to_id.find(byte_str);
630
+ if (token_multibyte != vocab.token_to_id.end()) {
631
+ output.push_back(token_multibyte->second);
632
+ }
633
+ }
634
+ } else {
635
+ output.push_back((*token).second);
636
+ }
637
+ }
638
+ }
639
+ }
640
+
641
+ private:
642
+ void add_new_bigram(int left, int right) {
643
+ if (left == -1 || right == -1) {
644
+ return;
645
+ }
646
+ std::string left_token = std::string(symbols[left].text, symbols[left].n);
647
+ std::string right_token = std::string(symbols[right].text, symbols[right].n);
648
+
649
+ int rank_found = -1;
650
+
651
+ rank_found = vocab.find_bpe_rank(left_token, right_token);
652
+
653
+ if (rank_found < 0) {
654
+ return;
655
+ }
656
+
657
+ llm_bigram_bpe bigram;
658
+
659
+ bigram.left = left;
660
+ bigram.right = right;
661
+ bigram.text = left_token + right_token;
662
+ bigram.size = left_token.size() + right_token.size();
663
+ bigram.rank = rank_found;
664
+
665
+ work_queue.push(bigram);
666
+ }
667
+
668
+ const llama_vocab & vocab;
669
+ const llm_tokenizer_bpe * bpe_tokenizer;
670
+
671
+ std::vector<llm_symbol> symbols;
672
+ std::vector<llm_symbol> symbols_final;
673
+ llm_bigram_bpe::queue work_queue;
674
+ };
675
+
676
+ //
677
+ // WPM tokenizer
678
+ //
679
+
680
+ struct llm_tokenizer_wpm : llm_tokenizer {
681
+ llm_tokenizer_wpm(const llama_vocab & /*vocab*/) : llm_tokenizer() {}
682
+ };
683
+
684
+ struct llm_tokenizer_wpm_session {
685
+ llm_tokenizer_wpm_session(const llama_vocab & vocab) : vocab(vocab) {}
686
+
687
+ void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
688
+ const auto & token_map = vocab.token_to_id;
689
+ // normalize and split by whitespace
690
+ std::vector<std::string> words = preprocess(text);
691
+ // bos token prepended already
692
+
693
+ // find the longest tokens that form the words
694
+ for (const std::string & word : words) {
695
+ // skip empty words
696
+ if (word.size() == 0) {
697
+ continue;
698
+ }
699
+
700
+ // prepend phantom space
701
+ const std::string word1 = "\xe2\x96\x81" + word;
702
+ const int n = word1.size();
703
+
704
+ const size_t current_tokens = output.size();
705
+
706
+ // we're at the start of a new word
707
+ // move through character position in word
708
+ for (int i = 0; i < n; ++i) {
709
+ // loop through possible match length
710
+ bool match = false;
711
+ for (int j = std::min(n, i + vocab.max_token_len + 1); j > i; j--) {
712
+ auto it = token_map.find(word1.substr(i, j - i));
713
+ if (it != token_map.end()) {
714
+ output.push_back(it->second);
715
+ match = true;
716
+ i = j - 1;
717
+ break;
718
+ }
719
+ }
720
+
721
+ if (!match) { // discard all
722
+ output.resize(current_tokens);
723
+ break; // and discard next tokens
724
+ }
725
+ }
726
+
727
+ // we didn't find any matches for this word
728
+ if (current_tokens == output.size()) {
729
+ output.push_back(vocab.special_unk_id);
730
+ }
731
+ }
732
+ }
733
+
734
+ // TODO: reduce string copies by using cpts_offs array
735
+ static std::vector<std::string> preprocess(const std::string & text) {
736
+ const std::vector<uint32_t> cpts_nfd = unicode_cpts_normalize_nfd(unicode_cpts_from_utf8(text));
737
+ std::vector<std::string> words(1, "");
738
+
739
+ for (const uint32_t cpt : cpts_nfd) {
740
+ const auto flags = unicode_cpt_flags(cpt);
741
+
742
+ if (flags.is_whitespace) {
743
+ if (words.back().size()) { // finish previous word if any
744
+ words.emplace_back();
745
+ }
746
+ continue;
747
+ }
748
+
749
+ assert (!flags.is_separator);
750
+ if (cpt == 0 || cpt == 0xFFFD || flags.is_control) {
751
+ continue;
752
+ }
753
+
754
+ const std::string s = unicode_cpt_to_utf8(unicode_tolower(cpt));
755
+ if (flags.is_punctuation || ( cpt < 0x7F && flags.is_symbol ) || is_chinese_char(cpt)) {
756
+ if (words.back().size()) { // finish previous word if any
757
+ words.emplace_back();
758
+ }
759
+ words.back() = s; // single char word
760
+ words.emplace_back(); // start a new word
761
+ } else {
762
+ words.back() += s; // append char to word
763
+ }
764
+ }
765
+
766
+ if (!words.back().size()) {
767
+ words.pop_back();
768
+ }
769
+
770
+ return words;
771
+ }
772
+
773
+ static bool is_chinese_char(uint32_t cpt) {
774
+ return
775
+ (cpt >= 0x04E00 && cpt <= 0x09FFF) ||
776
+ (cpt >= 0x03400 && cpt <= 0x04DBF) ||
777
+ (cpt >= 0x20000 && cpt <= 0x2A6DF) ||
778
+ (cpt >= 0x2A700 && cpt <= 0x2B73F) ||
779
+ (cpt >= 0x2B740 && cpt <= 0x2B81F) ||
780
+ (cpt >= 0x2B920 && cpt <= 0x2CEAF) || // this should be 0x2B820 but in hf rust code it is 0x2B920
781
+ (cpt >= 0x0F900 && cpt <= 0x0FAFF) ||
782
+ (cpt >= 0x2F800 && cpt <= 0x2FA1F);
783
+ //(cpt >= 0x3000 && cpt <= 0x303F) ||
784
+ //(cpt >= 0xFF00 && cpt <= 0xFFEF);
785
+ }
786
+
787
+ private:
788
+ const llama_vocab & vocab;
789
+ // currently unused
790
+ // const llm_tokenizer_wpm * wpm_tokenizer;
791
+ };
792
+
793
+ //
794
+ // UGM tokenizer
795
+ //
796
+
797
+ struct llm_tokenizer_ugm : llm_tokenizer {
798
+ llm_tokenizer_ugm(const llama_vocab & vocab) : llm_tokenizer() {
799
+ if (vocab.precompiled_charsmap.size() > 0) {
800
+ size_t charsmap_offset = 0;
801
+
802
+ // First four bytes of precompiled_charsmap contains length of binary
803
+ // blob containing XOR-compressed compact double array (XCDA) entries
804
+ uint32_t xcda_blob_size = *(const uint32_t *) &vocab.precompiled_charsmap[0];
805
+ charsmap_offset += sizeof(xcda_blob_size);
806
+ if (xcda_blob_size + charsmap_offset >= vocab.precompiled_charsmap.size()) {
807
+ throw std::runtime_error("Index out of array bounds in precompiled charsmap!");
808
+ }
809
+
810
+ // Next xcda_blob_size bytes contain entries of XOR-compressed compact
811
+ // double array (XCDA). Each entry is bit-packed into a 32-bit integer.
812
+ xcda_array = (const uint32_t *) &vocab.precompiled_charsmap[charsmap_offset];
813
+ xcda_array_size = xcda_blob_size / sizeof(uint32_t);
814
+ charsmap_offset += xcda_blob_size;
815
+
816
+ // Remaining bytes of precompiled charsmap contain null-terminated
817
+ // replacement strings for prefixes matched by the XCDA.
818
+ prefix_replacements = &vocab.precompiled_charsmap[charsmap_offset];
819
+ prefix_replacements_size = vocab.precompiled_charsmap.size() - charsmap_offset;
820
+ }
821
+
822
+ for (unsigned int id = 0; id < vocab.id_to_token.size(); ++id) {
823
+ const auto &token_data = vocab.id_to_token[id];
824
+
825
+ if (llama_is_normal_token(vocab, id)) {
826
+ min_score = std::min<float>(min_score, token_data.score);
827
+ max_score = std::max<float>(max_score, token_data.score);
828
+ }
829
+
830
+ if (llama_is_normal_token(vocab, id) ||
831
+ llama_is_user_defined_token(vocab, id) ||
832
+ llama_is_unused_token(vocab, id)) {
833
+ token_matcher.insert(token_data.text.data(), token_data.text.size(), id);
834
+ }
835
+
836
+ if (llama_is_user_defined_token(vocab, id)) {
837
+ user_defined_token_matcher.insert(token_data.text.data(), token_data.text.size());
838
+ }
839
+ }
840
+
841
+ unknown_token_score = min_score - unknown_token_score_penalty;
842
+ }
843
+
844
+ // escaped space symbol - U+2581 (Lower One Eighth Block)
845
+ const std::string escaped_space = "\xE2\x96\x81";
846
+
847
+ const char * prefix_replacements = NULL;
848
+ size_t prefix_replacements_size = 0;
849
+
850
+ const uint32_t * xcda_array = NULL;
851
+ size_t xcda_array_size = 0;
852
+
853
+ struct naive_trie user_defined_token_matcher;
854
+
855
+ float min_score = FLT_MAX;
856
+ float max_score = -FLT_MAX;
857
+
858
+ float unknown_token_score_penalty = 10.0;
859
+ float unknown_token_score;
860
+
861
+ struct naive_trie token_matcher;
862
+ };
863
+
864
+ struct llm_tokenizer_ugm_session {
865
+ llm_tokenizer_ugm_session(const llama_vocab & vocab) : vocab(vocab),
866
+ ugm_tokenizer(static_cast<const llm_tokenizer_ugm *>(vocab.tokenizer)) {}
867
+
868
+ /* This implementation is based on SentencePiece optimized Viterbi algorithm for
869
+ * unigram language models. The general idea is to:
870
+ * - move along the input sequence in steps of one UTF code point,
871
+ * - at each step find all possible tokenizations of the prefix by
872
+ * traversing the tokens trie,
873
+ * - for each tokenization store the best one so far (by higher score)
874
+ * - use the position in sequence after given token as an index to store
875
+ * results
876
+ * - if there was no valid tokenization of the current UTF code point
877
+ * then use unknown token with additional score penalty
878
+ * After processing the whole sequence we backtrack from the end to get
879
+ * the best tokenization.
880
+ */
881
+ void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
882
+ // get current size of output (for reversal later)
883
+ size_t output_size = output.size();
884
+
885
+ // normalize the input first
886
+ std::string normalized;
887
+ normalize(text, &normalized);
888
+ size_t input_len = normalized.size();
889
+ if (input_len == 0) {
890
+ return;
891
+ }
892
+
893
+ // initialize score_sum to -FLT_MAX so it will be always lower than sums of token scores
894
+ std::vector<struct best_tokenization> tokenization_results(input_len + 1, {vocab.special_unk_id, 0, -FLT_MAX});
895
+ // at the beginning tokenization score is zero
896
+ tokenization_results[0] = { vocab.special_unk_id, 0, 0 };
897
+
898
+ for (size_t input_offset = 0; input_offset < input_len;) {
899
+ size_t prefix_offset = input_offset;
900
+ // calculate how many code units are in the currently processed UTF code point
901
+ size_t n_utf8_code_units = std::min<size_t>(unicode_len_utf8(normalized[input_offset]), input_len - input_offset);
902
+
903
+ // traverse the token matcher trie to find a matching token
904
+ bool single_codepoint_token_found = false;
905
+ const struct best_tokenization & current_best = tokenization_results[input_offset];
906
+ const struct naive_trie * node = ugm_tokenizer->token_matcher.traverse(normalized[prefix_offset++]);
907
+
908
+ while (prefix_offset <= input_len && node != NULL) {
909
+ // check if we found valid token in prefix
910
+ if (node->has_value) {
911
+ // check if it corresponds to the whole UTF code point
912
+ if (prefix_offset - input_offset == n_utf8_code_units) {
913
+ single_codepoint_token_found = true;
914
+ }
915
+ llama_token token_id = node->value;
916
+ const auto & token_data = vocab.id_to_token[token_id];
917
+
918
+ // we set the user-defined token scores to 0 to make them more likely to be selected
919
+ // (normal token scores are log probabilities, so they are negative)
920
+ // score type is double here to make tokenization results exactly
921
+ // the same as in the HF tokenizer using SentencePiece
922
+ const double token_score = llama_is_user_defined_token(vocab, token_id) ? 0.0 : token_data.score;
923
+ const double challenger_score = current_best.score_sum + token_score;
924
+ struct best_tokenization & current_champ = tokenization_results[prefix_offset];
925
+ if (challenger_score > current_champ.score_sum) {
926
+ struct best_tokenization challenger = { token_id, input_offset, (float) challenger_score };
927
+ current_champ = challenger;
928
+ }
929
+ }
930
+ node = node->traverse(normalized[prefix_offset++]);
931
+ }
932
+
933
+ // if we didn't find a valid token corresponding to the whole UTF code point
934
+ // then use unknown token as the tokenization of this UTF code point
935
+ if (!single_codepoint_token_found) {
936
+ const double challenger_score = current_best.score_sum + ugm_tokenizer->unknown_token_score;
937
+ prefix_offset = input_offset + n_utf8_code_units;
938
+ struct best_tokenization & current_champ = tokenization_results[prefix_offset];
939
+ if (challenger_score > current_champ.score_sum) {
940
+ struct best_tokenization challenger = { vocab.special_unk_id, input_offset, (float) challenger_score };
941
+ current_champ = challenger;
942
+ }
943
+ }
944
+
945
+ // move to the next UTF code point
946
+ input_offset += n_utf8_code_units;
947
+ }
948
+
949
+ // now backtrack from the end to gather token ids of the best tokenization
950
+ // merge sequences of consecutive unknown tokens into single unknown tokens
951
+ bool is_prev_unknown = false;
952
+ for (struct best_tokenization & tokenization = tokenization_results[input_len]; ; tokenization = tokenization_results[tokenization.input_offset]) {
953
+ bool is_unknown = tokenization.token_id == vocab.special_unk_id;
954
+ if (!(is_prev_unknown && is_unknown)) {
955
+ output.push_back(tokenization.token_id);
956
+ }
957
+ if (tokenization.input_offset == 0) {
958
+ break;
959
+ }
960
+ is_prev_unknown = is_unknown;
961
+ }
962
+
963
+ // reverse the output since we added tokens starting from the end of the input
964
+ std::reverse(output.begin() + output_size, output.end());
965
+ }
966
+
967
+ private:
968
+
969
+ // helper structure for returning normalization results
970
+ struct normalization_result {
971
+ const char * normalized;
972
+ size_t normalized_len;
973
+ size_t consumed_input;
974
+ };
975
+
976
+ void normalize(const std::string& input, std::string * normalized) {
977
+ normalized->clear();
978
+ normalized->reserve(input.size() * 3);
979
+
980
+ const std::string space = vocab.tokenizer_escape_whitespaces ? ugm_tokenizer->escaped_space : " ";
981
+
982
+ bool shall_prepend_space = !vocab.tokenizer_treat_whitespace_as_suffix && vocab.tokenizer_add_space_prefix;
983
+ bool shall_append_space = vocab.tokenizer_treat_whitespace_as_suffix && vocab.tokenizer_add_space_prefix;
984
+ bool shall_merge_spaces = vocab.tokenizer_remove_extra_whitespaces;
985
+
986
+ bool is_space_prepended = false;
987
+ bool processing_non_ws = false;
988
+
989
+ size_t input_len = input.size();
990
+
991
+ for (size_t input_offset = 0; input_offset < input_len; ) {
992
+ auto norm_res = normalize_prefix(input, input_offset);
993
+ for (size_t i = 0; i < norm_res.normalized_len; i++) {
994
+ char c = norm_res.normalized[i];
995
+ if (c != ' ') {
996
+ if (!processing_non_ws) {
997
+ processing_non_ws = true;
998
+ if ((shall_prepend_space && !is_space_prepended) || shall_merge_spaces) {
999
+ normalized->append(space);
1000
+ is_space_prepended = true;
1001
+ }
1002
+ }
1003
+ normalized->push_back(c);
1004
+ } else {
1005
+ if (processing_non_ws) {
1006
+ processing_non_ws = false;
1007
+ }
1008
+ if (!shall_merge_spaces) {
1009
+ normalized->append(space);
1010
+ }
1011
+ }
1012
+ }
1013
+
1014
+ input_offset += norm_res.consumed_input;
1015
+ }
1016
+
1017
+ if (shall_append_space) {
1018
+ normalized->append(space);
1019
+ }
1020
+ }
1021
+
1022
+ /*
1023
+ * This structure is a view wrapper for XOR-compressed double array (XCDA)
1024
+ * See Shunsuke Kanda (2018). Space- and Time-Efficient String Dictionaries.
1025
+ * Each bit-packed entry contains:
1026
+ * - BASE array value in bits 10-30
1027
+ * - LCHECK array value in bits 0-7
1028
+ * - LEAF array value in bit 9
1029
+ * Entries containing indexes of replacement sequences have set bit 31
1030
+ */
1031
+ struct xcda_array_view {
1032
+ public:
1033
+ xcda_array_view(const uint32_t * xcda_array, size_t xcda_array_size) : xcda_array(xcda_array), xcda_array_size(xcda_array_size) {
1034
+ }
1035
+ uint32_t get_base(size_t index) {
1036
+ uint32_t packed_node = get_node(index);
1037
+ return (packed_node >> 10) << ((packed_node & (1U << 9)) >> 6);
1038
+ }
1039
+ uint32_t get_lcheck(size_t index) {
1040
+ uint32_t packed_node = get_node(index);
1041
+ return packed_node & ((1U << 31) | 0xff);
1042
+ }
1043
+ bool get_leaf(size_t index) {
1044
+ uint32_t packed_node = get_node(index);
1045
+ return (packed_node >> 8) & 1;
1046
+ }
1047
+ uint32_t get_value(size_t index) {
1048
+ uint32_t packed_node = get_node(index);
1049
+ return packed_node & ((1U << 31) - 1);
1050
+ }
1051
+ private:
1052
+ uint32_t get_node(size_t index) {
1053
+ if (index > xcda_array_size) {
1054
+ throw std::runtime_error("Index out of array bounds in XCDA array!");
1055
+ }
1056
+ return xcda_array[index];
1057
+ }
1058
+ const uint32_t * xcda_array;
1059
+ size_t xcda_array_size;
1060
+ };
1061
+
1062
+ // this structure stores the best tokenization so far at input_offset
1063
+ struct best_tokenization {
1064
+ llama_token token_id;
1065
+ size_t input_offset;
1066
+ float score_sum;
1067
+ };
1068
+
1069
+ struct normalization_result normalize_prefix(const std::string & input, size_t input_offset) {
1070
+ if (input_offset == input.size()) {
1071
+ return { &input[input_offset], 0, 0 };
1072
+ }
1073
+
1074
+ // if input prefix matches some user-defined token return this token as normalization result
1075
+ auto user_defined_token_match =
1076
+ ugm_tokenizer->user_defined_token_matcher.get_longest_prefix(&input[input_offset], input.size() - input_offset);
1077
+ if (user_defined_token_match.second > 0) {
1078
+ return { &input[input_offset], user_defined_token_match.second, user_defined_token_match.second };
1079
+ }
1080
+
1081
+ size_t longest_prefix_length = 0;
1082
+ size_t longest_prefix_offset = 0;
1083
+
1084
+ if (ugm_tokenizer->xcda_array_size > 0) {
1085
+ struct xcda_array_view xcda_view(ugm_tokenizer->xcda_array, ugm_tokenizer->xcda_array_size);
1086
+
1087
+ // Find the longest normalized sequence matching the input prefix by walking
1088
+ // the XOR-compressed compact double array (XCDA) starting from the root node
1089
+ // We find the index of the next node by calculating BASE[s] ^ c where s is
1090
+ // the index of the previous node and c is a numerical character value
1091
+ uint32_t node_index = 0;
1092
+ // get BASE of the root node
1093
+ node_index = xcda_view.get_base(node_index);
1094
+ for (size_t prefix_offset = input_offset; prefix_offset < input.size(); prefix_offset++) {
1095
+ unsigned char c = input[prefix_offset];
1096
+ if (c == 0) {
1097
+ break;
1098
+ }
1099
+ node_index ^= c;
1100
+ // if value of LCHECK is not c it means that this is not a child of
1101
+ // the previous node, so we stop matching
1102
+ if (xcda_view.get_lcheck(node_index) != c) {
1103
+ break;
1104
+ }
1105
+ bool is_leaf = xcda_view.get_leaf(node_index);
1106
+ // get BASE of the current node
1107
+ node_index ^= xcda_view.get_base(node_index);
1108
+ // if LEAF of the current node is true, it means that its BASE points to the node
1109
+ // containing index of replacement sequence for currently matched input prefix
1110
+ if (is_leaf)
1111
+ {
1112
+ longest_prefix_length = prefix_offset - input_offset + 1;
1113
+ // get index of replacement sequence for currently matched input prefix
1114
+ longest_prefix_offset = xcda_view.get_value(node_index);
1115
+ }
1116
+ }
1117
+ }
1118
+
1119
+ if (longest_prefix_length > 0) {
1120
+ // we have a match, so return the replacement sequence
1121
+ if (longest_prefix_offset >= ugm_tokenizer->prefix_replacements_size) {
1122
+ throw std::runtime_error("Index out of array bounds in precompiled charsmap!");
1123
+ }
1124
+ const char * prefix_replacement = &(ugm_tokenizer->prefix_replacements)[longest_prefix_offset];
1125
+ return { prefix_replacement, strlen(prefix_replacement), longest_prefix_length };
1126
+ }
1127
+
1128
+ // check if the input prefix contains a valid sequence of UTF-8 code units
1129
+ try {
1130
+ // if yes, return this sequence unmodified
1131
+ size_t prefix_offset = input_offset;
1132
+ unicode_cpt_from_utf8(input, prefix_offset);
1133
+ return { &input[input_offset], prefix_offset - input_offset, prefix_offset - input_offset };
1134
+ } catch (std::invalid_argument & /*ex*/) {
1135
+ // if no, consume 1 byte and return U+FFFD - REPLACEMENT CHARACTER
1136
+ return { "\xEF\xBF\xBD", 3, 1 };
1137
+ }
1138
+ }
1139
+
1140
+ const llama_vocab & vocab;
1141
+ const llm_tokenizer_ugm * ugm_tokenizer;
1142
+ };
1143
+
1144
+ //
1145
+ // RWKV tokenizer
1146
+ //
1147
+
1148
+ static std::vector<uint8_t> llama_unescape_rwkv_token(const std::string & escaped) {
1149
+ std::vector<uint8_t> output;
1150
+ output.reserve(escaped.size());
1151
+
1152
+ // Parser state
1153
+ bool escaping = false;
1154
+ uint8_t hex_remaining = 0;
1155
+ uint8_t hex_acc = 0;
1156
+
1157
+ // Step through characters, performing parsing
1158
+ for (const char & c : escaped) {
1159
+ // If we're parsing a hex code, interpret the next character
1160
+ if (hex_remaining != 0) {
1161
+ uint8_t value = (c >= 'a') ? (c - 'a' + 10) : (c - '0');
1162
+ hex_acc = (hex_acc << 4) + value;
1163
+
1164
+ hex_remaining -= 1;
1165
+ if (hex_remaining == 0) {
1166
+ output.push_back(hex_acc);
1167
+ hex_acc = 0;
1168
+ }
1169
+
1170
+ continue;
1171
+ }
1172
+
1173
+ // If we got an escape character, interpret it
1174
+ if (escaping) {
1175
+ if (c == 't') {
1176
+ output.push_back('\t');
1177
+ } else if (c == 'n') {
1178
+ output.push_back('\n');
1179
+ } else if (c == 'r') {
1180
+ output.push_back('\r');
1181
+ } else if (c == 'x') {
1182
+ hex_remaining = 2;
1183
+ } else {
1184
+ output.push_back(c);
1185
+ }
1186
+
1187
+ escaping = false;
1188
+ continue;
1189
+ }
1190
+
1191
+ if (c == '\\') {
1192
+ escaping = true;
1193
+ continue;
1194
+ }
1195
+
1196
+ output.push_back(c);
1197
+ }
1198
+
1199
+ return output;
1200
+ }
1201
+
1202
+ struct llm_tokenizer_rwkv : llm_tokenizer {
1203
+ llm_tokenizer_rwkv(const llama_vocab & vocab) : llm_tokenizer() {
1204
+ // RWKV supports arbitrary byte tokens, but the vocab struct only supports string tokens.
1205
+ // For now, we decode the vocab here into the lookup we'll use for tokenization.
1206
+
1207
+ // build trie
1208
+ for (unsigned int id = 0; id < vocab.id_to_token.size(); ++id) {
1209
+ const auto & token = vocab.id_to_token[id];
1210
+ const auto data = llama_unescape_rwkv_token(token.text);
1211
+ token_matcher.insert((const char *) data.data(), data.size(), id);
1212
+ }
1213
+ }
1214
+
1215
+ struct naive_trie token_matcher;
1216
+ };
1217
+
1218
+ struct llm_tokenizer_rwkv_session {
1219
+ llm_tokenizer_rwkv_session(const llama_vocab & vocab) : vocab(vocab),
1220
+ rwkv_tokenizer(static_cast<const llm_tokenizer_rwkv &>(*vocab.tokenizer)) {}
1221
+
1222
+ void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
1223
+ uint32_t position = 0;
1224
+ while (position < text.size()) {
1225
+ const struct naive_trie * node = rwkv_tokenizer.token_matcher.traverse(text[position]);
1226
+ if (node == NULL) {
1227
+ // no matching token found, add unknown token
1228
+ output.push_back(vocab.special_unk_id);
1229
+ position += 1;
1230
+ continue;
1231
+ }
1232
+
1233
+ // traverse the trie to find the longest matching token
1234
+ uint32_t token_id = 0;
1235
+ uint32_t token_length = 0;
1236
+ while (node != NULL) {
1237
+ if (node->has_value) {
1238
+ token_id = node->value;
1239
+ token_length = position + 1;
1240
+ }
1241
+ node = node->traverse(text[++position]);
1242
+ }
1243
+
1244
+ // add the longest matching token
1245
+ output.push_back(token_id);
1246
+ position = token_length;
1247
+ }
1248
+ }
1249
+
1250
+ private:
1251
+ const llama_vocab & vocab;
1252
+ const llm_tokenizer_rwkv & rwkv_tokenizer;
1253
+ };
1254
+
1255
+ void llama_vocab::init_tokenizer() {
1256
+ switch (type) {
1257
+ case LLAMA_VOCAB_TYPE_SPM:
1258
+ tokenizer = new llm_tokenizer_spm(*this);
1259
+ break;
1260
+ case LLAMA_VOCAB_TYPE_BPE:
1261
+ tokenizer = new llm_tokenizer_bpe(*this);
1262
+ break;
1263
+ case LLAMA_VOCAB_TYPE_WPM:
1264
+ tokenizer = new llm_tokenizer_wpm(*this);
1265
+ break;
1266
+ case LLAMA_VOCAB_TYPE_UGM:
1267
+ tokenizer = new llm_tokenizer_ugm(*this);
1268
+ break;
1269
+ case LLAMA_VOCAB_TYPE_RWKV:
1270
+ tokenizer = new llm_tokenizer_rwkv(*this);
1271
+ break;
1272
+ default:
1273
+ LM_GGML_ABORT("unsupported vocab type");
1274
+ }
1275
+ }
1276
+
1277
+ //
1278
+ // (de-) tokenize
1279
+ //
1280
+
1281
+ typedef enum FRAGMENT_BUFFER_VARIANT_TYPE {
1282
+ FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN,
1283
+ FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT
1284
+ } FRAGMENT_BUFFER_VARIANT_TYPE;
1285
+
1286
+ struct fragment_buffer_variant {
1287
+ fragment_buffer_variant(llama_vocab::id _token)
1288
+ :
1289
+ type(FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN),
1290
+ token(_token),
1291
+ raw_text(_dummy),
1292
+ offset(0),
1293
+ length(0) {}
1294
+
1295
+ fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
1296
+ :
1297
+ type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
1298
+ token((llama_vocab::id) - 1),
1299
+ raw_text(_raw_text),
1300
+ offset(_offset),
1301
+ length(_length){
1302
+ LM_GGML_ASSERT(_offset >= 0);
1303
+ LM_GGML_ASSERT(_length >= 1);
1304
+ LM_GGML_ASSERT(offset + length <= raw_text.length());
1305
+ }
1306
+
1307
+ const FRAGMENT_BUFFER_VARIANT_TYPE type;
1308
+ const llama_vocab::id token;
1309
+ const std::string _dummy;
1310
+ const std::string & raw_text;
1311
+ const uint64_t offset;
1312
+ const uint64_t length;
1313
+ };
1314
+
1315
+ // #define PRETOKENIZERDEBUG
1316
+
1317
+ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer, bool parse_special) {
1318
+ // for each special token
1319
+ for (const llama_vocab::id special_id : vocab.cache_special_tokens) {
1320
+ const auto & data = vocab.id_to_token[special_id];
1321
+ const auto & special_token = data.text;
1322
+
1323
+ if (!parse_special && (data.attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_UNKNOWN))) {
1324
+ // Ignore control and unknown tokens when parse_special == false
1325
+ continue;
1326
+ // User-defined tokens are still pre-tokenized before everything else
1327
+ // ref: https://github.com/huggingface/tokenizers/blob/fdd26ba9a3f0c133427aab0423888cbde91362d7/tokenizers/src/tokenizer/mod.rs#L726
1328
+ // This is mostly relevant for neox-style tokenizers (mpt, olmo, stablelm, etc.)
1329
+ }
1330
+
1331
+ // for each text fragment
1332
+ std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
1333
+ while (it != buffer.end()) {
1334
+ auto & fragment = (*it);
1335
+
1336
+ // if a fragment is text ( not yet processed )
1337
+ if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
1338
+ const auto & raw_text = fragment.raw_text;
1339
+
1340
+ auto raw_text_base_offset = fragment.offset;
1341
+ auto raw_text_base_length = fragment.length;
1342
+
1343
+ // loop over the text
1344
+ while (true) {
1345
+ // find the first occurrence of a given special token in this fragment
1346
+ // passing offset argument only limit the "search area" but match coordinates
1347
+ // are still relative to the source full raw_text
1348
+ auto match = raw_text.find(special_token, raw_text_base_offset);
1349
+
1350
+ // no occurrences found, stop processing this fragment for a given special token
1351
+ if (match == std::string::npos) break;
1352
+
1353
+ // check if match is within bounds of offset <-> length
1354
+ if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
1355
+
1356
+ #ifdef PRETOKENIZERDEBUG
1357
+ LLAMA_LOG_WARN("FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
1358
+ #endif
1359
+ auto source = std::distance(buffer.begin(), it);
1360
+
1361
+ // if match is further than base offset
1362
+ // then we have some text to the left of it
1363
+ if (match > raw_text_base_offset) {
1364
+ // left
1365
+ const int64_t left_reminder_offset = raw_text_base_offset + 0;
1366
+ int64_t left_reminder_length = match - raw_text_base_offset;
1367
+
1368
+ if (data.attr & LLAMA_TOKEN_ATTR_LSTRIP) {
1369
+ while (left_reminder_length > 0 && isspace(raw_text[left_reminder_offset + left_reminder_length - 1])) {
1370
+ left_reminder_length--;
1371
+ }
1372
+ }
1373
+
1374
+ if (left_reminder_length > 0) {
1375
+ buffer.emplace_after(it, raw_text, left_reminder_offset, left_reminder_length);
1376
+ it++;
1377
+ }
1378
+
1379
+ #ifdef PRETOKENIZERDEBUG
1380
+ LLAMA_LOG_WARN("FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
1381
+ #endif
1382
+ }
1383
+
1384
+ // special token
1385
+ buffer.emplace_after(it, special_id);
1386
+ it++;
1387
+
1388
+ // right
1389
+ if (match + special_token.length() < raw_text_base_offset + raw_text_base_length) {
1390
+ int64_t right_reminder_offset = match + special_token.length();
1391
+ int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
1392
+
1393
+ if (data.attr & LLAMA_TOKEN_ATTR_RSTRIP) {
1394
+ while (right_reminder_length > 0 && isspace(raw_text[right_reminder_offset])) {
1395
+ right_reminder_offset++;
1396
+ right_reminder_length--;
1397
+ }
1398
+ }
1399
+
1400
+ if (right_reminder_length > 0) {
1401
+ buffer.emplace_after(it, raw_text, right_reminder_offset, right_reminder_length);
1402
+ it++;
1403
+ }
1404
+
1405
+ #ifdef PRETOKENIZERDEBUG
1406
+ LLAMA_LOG_WARN("FR: (%ld %ld) '%s'\n", right_reminder_offset, right_reminder_length, raw_text->substr(right_reminder_offset, right_reminder_length).c_str());
1407
+ #endif
1408
+
1409
+ if (source == 0) {
1410
+ buffer.erase_after(buffer.before_begin());
1411
+ } else {
1412
+ buffer.erase_after(std::next(buffer.begin(), (source-1)));
1413
+ }
1414
+
1415
+ // repeat for the right side
1416
+ raw_text_base_offset = right_reminder_offset;
1417
+ raw_text_base_length = right_reminder_length;
1418
+
1419
+ #ifdef PRETOKENIZERDEBUG
1420
+ LLAMA_LOG_WARN("RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
1421
+ #endif
1422
+ } else {
1423
+ if (source == 0) {
1424
+ buffer.erase_after(buffer.before_begin());
1425
+ } else {
1426
+ buffer.erase_after(std::next(buffer.begin(), (source-1)));
1427
+ }
1428
+ break;
1429
+ }
1430
+ }
1431
+ }
1432
+ it++;
1433
+ }
1434
+ }
1435
+ }
1436
+
1437
+ std::vector<llama_vocab::id> llama_tokenize_internal(
1438
+ const llama_vocab & vocab,
1439
+ std::string raw_text,
1440
+ bool add_special,
1441
+ bool parse_special) {
1442
+ LM_GGML_ASSERT(vocab.tokenizer && "Tokenizer not initialized. Call llama_vocab::init_tokenizer() first.");
1443
+
1444
+ std::vector<llama_vocab::id> output;
1445
+ std::forward_list<fragment_buffer_variant> fragment_buffer;
1446
+
1447
+ if (!raw_text.empty()) {
1448
+ fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
1449
+ tokenizer_st_partition(vocab, fragment_buffer, parse_special);
1450
+ }
1451
+
1452
+ switch (vocab.type) {
1453
+ case LLAMA_VOCAB_TYPE_SPM:
1454
+ {
1455
+ // OG tokenizer behavior:
1456
+ //
1457
+ // tokenizer.encode('', add_special_tokens=True) returns [1]
1458
+ // tokenizer.encode('', add_special_tokens=False) returns []
1459
+
1460
+ bool is_prev_special = true; // prefix with space if first token
1461
+
1462
+ if (add_special && vocab.tokenizer_add_bos) {
1463
+ LM_GGML_ASSERT(vocab.special_bos_id != -1);
1464
+ output.push_back(vocab.special_bos_id);
1465
+ is_prev_special = true;
1466
+ }
1467
+
1468
+ for (const auto & fragment : fragment_buffer) {
1469
+ if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
1470
+ auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
1471
+
1472
+ // prefix with space if previous is special
1473
+ if (vocab.tokenizer_add_space_prefix && is_prev_special) {
1474
+ raw_text = " " + raw_text;
1475
+ }
1476
+
1477
+ #ifdef PRETOKENIZERDEBUG
1478
+ LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
1479
+ #endif
1480
+ llama_escape_whitespace(raw_text);
1481
+ llm_tokenizer_spm_session session(vocab);
1482
+ session.tokenize(raw_text, output);
1483
+ is_prev_special = false;
1484
+ } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
1485
+ output.push_back(fragment.token);
1486
+ is_prev_special = true;
1487
+ }
1488
+ }
1489
+
1490
+ if (add_special && vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
1491
+ LLAMA_LOG_WARN(
1492
+ "%s: Added a BOS token to the prompt as specified by the model but the prompt "
1493
+ "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
1494
+ "Are you sure this is what you want?\n", __FUNCTION__);
1495
+ }
1496
+
1497
+ if (add_special && vocab.tokenizer_add_eos) {
1498
+ LM_GGML_ASSERT(vocab.special_eos_id != -1);
1499
+ output.push_back(vocab.special_eos_id);
1500
+ }
1501
+ } break;
1502
+ case LLAMA_VOCAB_TYPE_BPE:
1503
+ {
1504
+ llm_tokenizer_bpe_session session(vocab);
1505
+ // it calls some other methods that are not exist in llm_tokenizer,
1506
+ // here just cast it to bpe tokenizer object
1507
+ if (add_special) {
1508
+ session.append_bos(output);
1509
+ }
1510
+ for (const auto & fragment : fragment_buffer) {
1511
+ if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
1512
+ auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
1513
+
1514
+ #ifdef PRETOKENIZERDEBUG
1515
+ LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
1516
+ #endif
1517
+ session.tokenize(raw_text, output);
1518
+ } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
1519
+ session.append(fragment.token, output);
1520
+ }
1521
+ }
1522
+
1523
+ if (add_special) {
1524
+ session.append_eos(output);
1525
+ session.check_double_bos_eos(output);
1526
+ }
1527
+ } break;
1528
+ case LLAMA_VOCAB_TYPE_WPM:
1529
+ {
1530
+ if (add_special) {
1531
+ LM_GGML_ASSERT(vocab.special_cls_id != -1);
1532
+ output.push_back(vocab.special_cls_id);
1533
+ }
1534
+
1535
+ llm_tokenizer_wpm_session session(vocab);
1536
+
1537
+ for (const auto & fragment : fragment_buffer) {
1538
+ if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
1539
+ auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
1540
+
1541
+ #ifdef PRETOKENIZERDEBUG
1542
+ LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
1543
+ #endif
1544
+ session.tokenize(raw_text, output);
1545
+ } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
1546
+ output.push_back(fragment.token);
1547
+ }
1548
+ }
1549
+
1550
+ if (add_special) {
1551
+ LM_GGML_ASSERT(vocab.special_sep_id != -1);
1552
+ output.push_back(vocab.special_sep_id);
1553
+ }
1554
+ } break;
1555
+ case LLAMA_VOCAB_TYPE_UGM:
1556
+ {
1557
+ if (add_special && vocab.tokenizer_add_bos) {
1558
+ LM_GGML_ASSERT(vocab.special_bos_id != -1);
1559
+ output.push_back(vocab.special_bos_id);
1560
+ }
1561
+ llm_tokenizer_ugm_session session(vocab);
1562
+
1563
+ for (const auto & fragment : fragment_buffer) {
1564
+ if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
1565
+ auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
1566
+ #ifdef PRETOKENIZERDEBUG
1567
+ LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
1568
+ #endif
1569
+ session.tokenize(raw_text, output);
1570
+ } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
1571
+ output.push_back(fragment.token);
1572
+ }
1573
+ }
1574
+
1575
+ if (add_special && vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
1576
+ LLAMA_LOG_WARN(
1577
+ "%s: Added a BOS token to the prompt as specified by the model but the prompt "
1578
+ "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
1579
+ "Are you sure this is what you want?\n", __FUNCTION__);
1580
+ }
1581
+
1582
+ if (add_special && vocab.tokenizer_add_eos) {
1583
+ LM_GGML_ASSERT(vocab.special_eos_id != -1);
1584
+ output.push_back(vocab.special_eos_id);
1585
+ }
1586
+ } break;
1587
+ case LLAMA_VOCAB_TYPE_RWKV:
1588
+ {
1589
+ llm_tokenizer_rwkv_session session(vocab);
1590
+ for (const auto & fragment : fragment_buffer) {
1591
+ if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
1592
+ auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
1593
+
1594
+ #ifdef PRETOKENIZERDEBUG
1595
+ LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
1596
+ #endif
1597
+
1598
+ session.tokenize(raw_text, output);
1599
+ } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
1600
+ output.push_back(fragment.token);
1601
+ }
1602
+ }
1603
+ } break;
1604
+ case LLAMA_VOCAB_TYPE_NONE:
1605
+ LM_GGML_ABORT("fatal error");
1606
+ }
1607
+
1608
+ return output;
1609
+ }
1610
+
1611
+ llama_token llama_byte_to_token_impl(const llama_vocab & vocab, uint8_t ch) {
1612
+ LM_GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
1613
+ static const char * hex = "0123456789ABCDEF";
1614
+ switch (llama_vocab_get_type(vocab)) {
1615
+ case LLAMA_VOCAB_TYPE_SPM:
1616
+ case LLAMA_VOCAB_TYPE_UGM: {
1617
+ const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
1618
+ auto token = vocab.token_to_id.find(buf);
1619
+ if (token != vocab.token_to_id.end()) {
1620
+ return (*token).second;
1621
+ }
1622
+ // Try to fall back to just the byte as a string
1623
+ const char buf2[2] = { (char)ch, 0 };
1624
+ return vocab.token_to_id.at(buf2);
1625
+ }
1626
+ case LLAMA_VOCAB_TYPE_WPM:
1627
+ case LLAMA_VOCAB_TYPE_BPE: {
1628
+ return vocab.token_to_id.at(unicode_byte_to_utf8(ch));
1629
+ }
1630
+ default:
1631
+ LM_GGML_ABORT("fatal error");
1632
+ }
1633
+ }
1634
+
1635
+ const char * llama_token_get_text_impl(const struct llama_vocab & vocab, llama_token token) {
1636
+ LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
1637
+ return vocab.id_to_token[token].text.c_str();
1638
+ }
1639
+
1640
+ float llama_token_get_score_impl(const struct llama_vocab & vocab, llama_token token) {
1641
+ LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
1642
+ return vocab.id_to_token[token].score;
1643
+ }
1644
+
1645
+ llama_token_attr llama_token_get_attr_impl(const struct llama_vocab & vocab, llama_token token) {
1646
+ LM_GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
1647
+ return vocab.id_to_token[token].attr;
1648
+ }
1649
+
1650
+ bool llama_token_is_eog_impl(const struct llama_vocab & vocab, llama_token token) {
1651
+ return token != -1 && vocab.special_eog_ids.count(token) > 0;
1652
+ }
1653
+
1654
+ bool llama_token_is_control_impl(const struct llama_vocab & vocab, llama_token token) {
1655
+ return llama_is_control_token(vocab, token);
1656
+ }
1657
+
1658
+ llama_token llama_token_bos_impl(const struct llama_vocab & vocab) {
1659
+ return vocab.special_bos_id;
1660
+ }
1661
+
1662
+ llama_token llama_token_eos_impl(const struct llama_vocab & vocab) {
1663
+ return vocab.special_eos_id;
1664
+ }
1665
+
1666
+ llama_token llama_token_eot_impl(const struct llama_vocab & vocab) {
1667
+ return vocab.special_eot_id;
1668
+ }
1669
+
1670
+ llama_token llama_token_eom_impl(const struct llama_vocab & vocab) {
1671
+ return vocab.special_eom_id;
1672
+ }
1673
+
1674
+ llama_token llama_token_cls_impl(const struct llama_vocab & vocab) {
1675
+ return vocab.special_cls_id;
1676
+ }
1677
+
1678
+ llama_token llama_token_sep_impl(const struct llama_vocab & vocab) {
1679
+ return vocab.special_sep_id;
1680
+ }
1681
+
1682
+ llama_token llama_token_nl_impl(const struct llama_vocab & vocab) {
1683
+ return vocab.linefeed_id;
1684
+ }
1685
+
1686
+ llama_token llama_token_pad_impl(const struct llama_vocab & vocab) {
1687
+ return vocab.special_pad_id;
1688
+ }
1689
+
1690
+ bool llama_add_bos_token_impl(const struct llama_vocab & vocab) {
1691
+ return vocab.tokenizer_add_bos;
1692
+ }
1693
+
1694
+ bool llama_add_eos_token_impl(const struct llama_vocab & vocab) {
1695
+ return vocab.tokenizer_add_eos;
1696
+ }
1697
+
1698
+ llama_token llama_token_prefix_impl(const struct llama_vocab & vocab) {
1699
+ return vocab.special_fim_pre_id;
1700
+ }
1701
+
1702
+ llama_token llama_token_middle_impl(const struct llama_vocab & vocab) {
1703
+ return vocab.special_fim_mid_id;
1704
+ }
1705
+
1706
+ llama_token llama_token_suffix_impl(const struct llama_vocab & vocab) {
1707
+ return vocab.special_fim_suf_id;
1708
+ }
1709
+
1710
+ llama_token llama_token_fim_pre_impl(const struct llama_vocab & vocab) {
1711
+ return vocab.special_fim_pre_id;
1712
+ }
1713
+
1714
+ llama_token llama_token_fim_suf_impl(const struct llama_vocab & vocab) {
1715
+ return vocab.special_fim_suf_id;
1716
+ }
1717
+
1718
+ llama_token llama_token_fim_mid_impl(const struct llama_vocab & vocab) {
1719
+ return vocab.special_fim_mid_id;
1720
+ }
1721
+
1722
+ llama_token llama_token_fim_pad_impl(const struct llama_vocab & vocab) {
1723
+ return vocab.special_fim_pad_id;
1724
+ }
1725
+
1726
+ llama_token llama_token_fim_rep_impl(const struct llama_vocab & vocab) {
1727
+ return vocab.special_fim_rep_id;
1728
+ }
1729
+
1730
+ llama_token llama_token_fim_sep_impl(const struct llama_vocab & vocab) {
1731
+ return vocab.special_fim_sep_id;
1732
+ }
1733
+
1734
+ int32_t llama_tokenize_impl(
1735
+ const struct llama_vocab & vocab,
1736
+ const char * text,
1737
+ int32_t text_len,
1738
+ llama_token * tokens,
1739
+ int32_t n_tokens_max,
1740
+ bool add_special,
1741
+ bool parse_special) {
1742
+ auto res = llama_tokenize_internal(vocab, std::string(text, text_len), add_special, parse_special);
1743
+ if (n_tokens_max < (int) res.size()) {
1744
+ // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
1745
+ return -((int) res.size());
1746
+ }
1747
+
1748
+ for (size_t i = 0; i < res.size(); i++) {
1749
+ tokens[i] = res[i];
1750
+ }
1751
+
1752
+ return res.size();
1753
+ }
1754
+
1755
+ static std::string llama_decode_text(const std::string & text) {
1756
+ std::string decoded_text;
1757
+
1758
+ const auto cpts = unicode_cpts_from_utf8(text);
1759
+ for (const auto cpt : cpts) {
1760
+ const auto utf8 = unicode_cpt_to_utf8(cpt);
1761
+ try {
1762
+ decoded_text += unicode_utf8_to_byte(utf8);
1763
+ } catch (const std::out_of_range & /*e*/) {
1764
+ decoded_text += "[UNK_BYTE_0x";
1765
+ for (const auto c : utf8) {
1766
+ decoded_text += format("%02x", (uint8_t) c);
1767
+ }
1768
+ decoded_text += text + "]";
1769
+ }
1770
+ }
1771
+
1772
+ return decoded_text;
1773
+ }
1774
+
1775
+ // does not write null-terminator to buf
1776
+ int32_t llama_token_to_piece_impl(const struct llama_vocab & vocab, llama_token token, char * buf, int32_t length, int32_t lstrip, bool special) {
1777
+ // ref: https://github.com/ggerganov/llama.cpp/pull/7587#discussion_r1620983843
1778
+ static const int attr_special = LLAMA_TOKEN_ATTR_UNKNOWN | LLAMA_TOKEN_ATTR_CONTROL;
1779
+ const llama_token_attr attr = llama_token_get_attr_impl(vocab, token);
1780
+ if (!special && (attr & attr_special)) {
1781
+ return 0;
1782
+ }
1783
+
1784
+ // copy piece chars to output text buffer
1785
+ // skip up to 'lstrip' leading spaces before copying
1786
+ auto _try_copy = [=] (const char * token, size_t size) -> int32_t {
1787
+ for (int32_t i = 0; i < lstrip && size && *token == ' '; ++i) {
1788
+ token++;
1789
+ size--;
1790
+ }
1791
+ if (length < (int32_t)size) {
1792
+ return -(int32_t) size;
1793
+ }
1794
+ memcpy(buf, token, size);
1795
+ return (int32_t) size;
1796
+ };
1797
+
1798
+ // if we have a cache - use it
1799
+ {
1800
+ const auto & cache = vocab.cache_token_to_piece;
1801
+
1802
+ if (!cache.empty()) {
1803
+ const auto & result = cache.at(token);
1804
+ return _try_copy(result.data(), result.size());
1805
+ }
1806
+ }
1807
+
1808
+ if (0 <= token && token < (int32_t) vocab.id_to_token.size()) {
1809
+ const std::string & token_text = vocab.id_to_token[token].text;
1810
+ switch (llama_vocab_get_type(vocab)) {
1811
+ case LLAMA_VOCAB_TYPE_WPM:
1812
+ case LLAMA_VOCAB_TYPE_SPM:
1813
+ case LLAMA_VOCAB_TYPE_UGM: {
1814
+ // NOTE: we accept all unsupported token types,
1815
+ // suppressing them like CONTROL tokens.
1816
+ if (attr & (attr_special | LLAMA_TOKEN_ATTR_USER_DEFINED)) {
1817
+ return _try_copy(token_text.data(), token_text.size());
1818
+ }
1819
+ if (attr & LLAMA_TOKEN_ATTR_NORMAL) {
1820
+ std::string result = token_text;
1821
+ llama_unescape_whitespace(result);
1822
+ return _try_copy(result.data(), result.size());
1823
+ }
1824
+ if (attr & LLAMA_TOKEN_ATTR_BYTE) {
1825
+ char byte = (char) llama_token_to_byte(vocab, token);
1826
+ return _try_copy((char*) &byte, 1);
1827
+ }
1828
+ break;
1829
+ }
1830
+ case LLAMA_VOCAB_TYPE_BPE: {
1831
+ // NOTE: we accept all unsupported token types,
1832
+ // suppressing them like CONTROL tokens.
1833
+ if (attr & (attr_special | LLAMA_TOKEN_ATTR_USER_DEFINED)) {
1834
+ return _try_copy(token_text.data(), token_text.size());
1835
+ }
1836
+ if (attr & LLAMA_TOKEN_ATTR_NORMAL) {
1837
+ std::string result = llama_decode_text(token_text);
1838
+ return _try_copy(result.data(), result.size());
1839
+ }
1840
+ break;
1841
+ }
1842
+ case LLAMA_VOCAB_TYPE_RWKV: {
1843
+ std::vector<uint8_t> result = llama_unescape_rwkv_token(token_text);
1844
+
1845
+ // If we don't have enough space, return an error
1846
+ if (result.size() > (size_t)length) {
1847
+ return -(int)result.size();
1848
+ }
1849
+
1850
+ memcpy(buf, result.data(), result.size());
1851
+ return (int)result.size();
1852
+ }
1853
+ default:
1854
+ LM_GGML_ABORT("fatal error");
1855
+ }
1856
+ }
1857
+
1858
+ return 0;
1859
+ }
1860
+
1861
+ int32_t llama_detokenize_impl(
1862
+ const struct llama_vocab & vocab,
1863
+ const llama_token * tokens,
1864
+ int32_t n_tokens,
1865
+ char * text,
1866
+ int32_t text_len_max,
1867
+ bool remove_special,
1868
+ bool unparse_special) {
1869
+ LM_GGML_ASSERT(vocab.tokenizer && "Tokenizer not initialized. Call llama_vocab::init_tokenizer() first.");
1870
+
1871
+ int32_t avail = text_len_max;
1872
+ int32_t total = 0;
1873
+
1874
+ // remove the leading space
1875
+ bool remove_space = vocab.tokenizer_add_space_prefix;
1876
+
1877
+ if (remove_special && vocab.tokenizer_add_bos) {
1878
+ if (n_tokens > 0 && tokens[0] == vocab.special_bos_id) {
1879
+ remove_space = false;
1880
+ n_tokens--;
1881
+ tokens++;
1882
+ }
1883
+ }
1884
+
1885
+ if (remove_special && vocab.tokenizer_add_eos) {
1886
+ if (n_tokens > 0 && tokens[n_tokens-1] == vocab.special_eos_id) {
1887
+ n_tokens--;
1888
+ }
1889
+ }
1890
+
1891
+ for (int32_t i = 0; i < n_tokens; ++i) {
1892
+ LM_GGML_ASSERT(avail >= 0);
1893
+ int32_t n_chars = llama_token_to_piece_impl(vocab, tokens[i], text, avail, remove_space, unparse_special);
1894
+ remove_space = false;
1895
+ if (n_chars < 0) {
1896
+ avail = 0;
1897
+ total -= n_chars;
1898
+ } else if (n_chars > 0) {
1899
+ avail -= n_chars;
1900
+ text += n_chars;
1901
+ total += n_chars;
1902
+ }
1903
+ }
1904
+
1905
+ if (total > text_len_max) {
1906
+ return -total;
1907
+ }
1908
+
1909
+ if (vocab.tokenizer_clean_spaces) {
1910
+ text -= total; // restart text
1911
+
1912
+ // first pass: characters ?!., //TODO: where do these characters come from?
1913
+ const int32_t total1 = total;
1914
+ total = total ? 1 : 0;
1915
+ for (int32_t i = 1; i < total1; ++i) {
1916
+ const char x = text[i];
1917
+ if (text[i - 1] == ' ') {
1918
+ if (x == '?' || x == '!' || x == '.' || x == ',') { // " ?", " !", " .", " ,"
1919
+ total--; // remove space
1920
+ }
1921
+ }
1922
+ text[total++] = x;
1923
+ }
1924
+
1925
+ // second pass: strip single apostrophe between spaces
1926
+ const int32_t total2 = total;
1927
+ total = total ? 1 : 0;
1928
+ for (int32_t i = 1; i < total2; ++i) {
1929
+ const char x = text[i];
1930
+ if (x == '\'' && i + 1 < total2 && text[i - 1] == ' ' && text[i + 1] == ' ') { // " ' "
1931
+ total--; // remove prev space
1932
+ text[++i] = '\0'; // remove next space
1933
+ }
1934
+ text[total++] = x;
1935
+ }
1936
+
1937
+ // third pass: apostrophe contractions //NOTE: this makes sense?
1938
+ const int32_t total3 = total;
1939
+ total = total ? 1 : 0;
1940
+ for (int32_t i = 1; i < total3; ++i) {
1941
+ const char x = text[i];
1942
+ if (text[i - 1] == ' ') {
1943
+ if (x == '\'' && i + 1 < total3) {
1944
+ const char x1 = text[i + 1];
1945
+ if (x1 == 't' || x1 == 'd') { // " 't", " 'd"
1946
+ //total--; // remove space
1947
+ } else if (x1 == 's' || x1 == 'm') { // " 's", " 'm"
1948
+ total--; // remove space
1949
+ } else if (i + 2 < total3) {
1950
+ const char x2 = text[i + 2];
1951
+ if ((x1 == 'l' && x2 == 'l')) { // " 'll"
1952
+ //total--; // remove space
1953
+ } else if ((x1 == 'r' && x2 == 'e') || (x1 == 'v' && x2 == 'e')) { // " 're", " 've"
1954
+ total--; // remove space
1955
+ } else {
1956
+ //total--; // remove space
1957
+ }
1958
+ } else {
1959
+ //total--; // remove space
1960
+ }
1961
+ }
1962
+ }
1963
+ text[total++] = x;
1964
+ }
1965
+ }
1966
+
1967
+ return total <= text_len_max ? total : -total;
1968
+ }
1969
+
1970
+ std::string llama_detokenize(const struct llama_vocab & vocab, const std::vector<llama_token> & tokens, bool special) {
1971
+ std::string text;
1972
+ text.resize(std::max(text.capacity(), tokens.size()));
1973
+ int32_t n_chars = llama_detokenize_impl(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
1974
+ if (n_chars < 0) {
1975
+ text.resize(-n_chars);
1976
+ n_chars = llama_detokenize_impl(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
1977
+ LM_GGML_ASSERT(n_chars <= (int32_t)text.size()); // whitespace trimming is performed after per-token detokenization
1978
+ }
1979
+
1980
+ text.resize(n_chars);
1981
+
1982
+ // NOTE: the original tokenizer decodes bytes after collecting the pieces.
1983
+ return text;
1984
+ }