cui-llama.rn 1.3.5 → 1.3.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/android/src/main/CMakeLists.txt +14 -8
  2. package/android/src/main/jni.cpp +38 -37
  3. package/cpp/common.cpp +43 -26
  4. package/cpp/common.h +18 -11
  5. package/cpp/ggml-backend-reg.cpp +5 -0
  6. package/cpp/ggml-backend.cpp +5 -2
  7. package/cpp/ggml-cpp.h +1 -0
  8. package/cpp/ggml-cpu-aarch64.cpp +6 -1
  9. package/cpp/ggml-cpu-quants.c +5 -1
  10. package/cpp/ggml-impl.h +11 -16
  11. package/cpp/ggml-metal.m +2 -2
  12. package/cpp/ggml.c +0 -1276
  13. package/cpp/ggml.h +0 -140
  14. package/cpp/gguf.cpp +1325 -0
  15. package/cpp/gguf.h +202 -0
  16. package/cpp/llama-adapter.cpp +346 -0
  17. package/cpp/llama-adapter.h +73 -0
  18. package/cpp/llama-arch.cpp +1434 -0
  19. package/cpp/llama-arch.h +395 -0
  20. package/cpp/llama-batch.cpp +368 -0
  21. package/cpp/llama-batch.h +88 -0
  22. package/cpp/llama-chat.cpp +567 -0
  23. package/cpp/llama-chat.h +51 -0
  24. package/cpp/llama-context.cpp +1771 -0
  25. package/cpp/llama-context.h +128 -0
  26. package/cpp/llama-cparams.cpp +1 -0
  27. package/cpp/llama-cparams.h +37 -0
  28. package/cpp/llama-cpp.h +30 -0
  29. package/cpp/llama-grammar.cpp +1 -0
  30. package/cpp/llama-grammar.h +3 -1
  31. package/cpp/llama-hparams.cpp +71 -0
  32. package/cpp/llama-hparams.h +140 -0
  33. package/cpp/llama-impl.cpp +167 -0
  34. package/cpp/llama-impl.h +16 -136
  35. package/cpp/llama-kv-cache.cpp +718 -0
  36. package/cpp/llama-kv-cache.h +218 -0
  37. package/cpp/llama-mmap.cpp +589 -0
  38. package/cpp/llama-mmap.h +67 -0
  39. package/cpp/llama-model-loader.cpp +1011 -0
  40. package/cpp/llama-model-loader.h +158 -0
  41. package/cpp/llama-model.cpp +2202 -0
  42. package/cpp/llama-model.h +391 -0
  43. package/cpp/llama-sampling.cpp +117 -4
  44. package/cpp/llama-vocab.cpp +21 -28
  45. package/cpp/llama-vocab.h +13 -1
  46. package/cpp/llama.cpp +8437 -19421
  47. package/cpp/llama.cpp.rej +23 -0
  48. package/cpp/llama.h +31 -6
  49. package/cpp/rn-llama.hpp +39 -37
  50. package/cpp/sgemm.cpp +776 -70
  51. package/cpp/unicode.cpp +6 -0
  52. package/package.json +1 -1
@@ -0,0 +1,2202 @@
1
+ #include "llama-model.h"
2
+
3
+ #include "llama-impl.h"
4
+ #include "llama-model-loader.h"
5
+
6
+ #include "unicode.h" // TODO: remove
7
+
8
+ #include <algorithm>
9
+ #include <cassert>
10
+ #include <functional>
11
+ #include <sstream>
12
+ #include <stdexcept>
13
+
14
+ static const size_t kiB = 1024;
15
+ static const size_t MiB = 1024*kiB;
16
+ static const size_t GiB = 1024*MiB;
17
+
18
+ const char * llm_type_name(llm_type type) {
19
+ switch (type) {
20
+ case MODEL_14M: return "14M";
21
+ case MODEL_17M: return "17M";
22
+ case MODEL_22M: return "22M";
23
+ case MODEL_33M: return "33M";
24
+ case MODEL_60M: return "60M";
25
+ case MODEL_70M: return "70M";
26
+ case MODEL_80M: return "80M";
27
+ case MODEL_109M: return "109M";
28
+ case MODEL_137M: return "137M";
29
+ case MODEL_160M: return "160M";
30
+ case MODEL_220M: return "220M";
31
+ case MODEL_250M: return "250M";
32
+ case MODEL_270M: return "270M";
33
+ case MODEL_335M: return "335M";
34
+ case MODEL_410M: return "410M";
35
+ case MODEL_450M: return "450M";
36
+ case MODEL_770M: return "770M";
37
+ case MODEL_780M: return "780M";
38
+ case MODEL_0_5B: return "0.5B";
39
+ case MODEL_1B: return "1B";
40
+ case MODEL_1_3B: return "1.3B";
41
+ case MODEL_1_4B: return "1.4B";
42
+ case MODEL_1_5B: return "1.5B";
43
+ case MODEL_1_6B: return "1.6B";
44
+ case MODEL_2B: return "2B";
45
+ case MODEL_2_8B: return "2.8B";
46
+ case MODEL_3B: return "3B";
47
+ case MODEL_4B: return "4B";
48
+ case MODEL_6B: return "6B";
49
+ case MODEL_6_9B: return "6.9B";
50
+ case MODEL_7B: return "7B";
51
+ case MODEL_8B: return "8B";
52
+ case MODEL_9B: return "9B";
53
+ case MODEL_11B: return "11B";
54
+ case MODEL_12B: return "12B";
55
+ case MODEL_13B: return "13B";
56
+ case MODEL_14B: return "14B";
57
+ case MODEL_15B: return "15B";
58
+ case MODEL_16B: return "16B";
59
+ case MODEL_20B: return "20B";
60
+ case MODEL_30B: return "30B";
61
+ case MODEL_32B: return "32B";
62
+ case MODEL_34B: return "34B";
63
+ case MODEL_35B: return "35B";
64
+ case MODEL_40B: return "40B";
65
+ case MODEL_65B: return "65B";
66
+ case MODEL_70B: return "70B";
67
+ case MODEL_236B: return "236B";
68
+ case MODEL_314B: return "314B";
69
+ case MODEL_671B: return "671B";
70
+ case MODEL_SMALL: return "0.1B";
71
+ case MODEL_MEDIUM: return "0.4B";
72
+ case MODEL_LARGE: return "0.8B";
73
+ case MODEL_XL: return "1.5B";
74
+ case MODEL_A1_7B: return "A1.7B";
75
+ case MODEL_A2_7B: return "A2.7B";
76
+ case MODEL_8x7B: return "8x7B";
77
+ case MODEL_8x22B: return "8x22B";
78
+ case MODEL_16x12B: return "16x12B";
79
+ case MODEL_10B_128x3_66B: return "10B+128x3.66B";
80
+ case MODEL_57B_A14B: return "57B.A14B";
81
+ case MODEL_27B: return "27B";
82
+ default: return "?B";
83
+ }
84
+ }
85
+
86
+ static std::string llama_model_ftype_name(llama_ftype ftype) {
87
+ if (ftype & LLAMA_FTYPE_GUESSED) {
88
+ return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
89
+ }
90
+
91
+ switch (ftype) {
92
+ case LLAMA_FTYPE_ALL_F32: return "all F32";
93
+ case LLAMA_FTYPE_MOSTLY_F16: return "F16";
94
+ case LLAMA_FTYPE_MOSTLY_BF16: return "BF16";
95
+ case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0";
96
+ case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1";
97
+ case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0";
98
+ case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1";
99
+ case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0";
100
+ case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium";
101
+ case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small";
102
+ case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small";
103
+ case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium";
104
+ case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large";
105
+ case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small";
106
+ case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium";
107
+ case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small";
108
+ case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium";
109
+ case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K";
110
+ case LLAMA_FTYPE_MOSTLY_TQ1_0: return "TQ1_0 - 1.69 bpw ternary";
111
+ case LLAMA_FTYPE_MOSTLY_TQ2_0: return "TQ2_0 - 2.06 bpw ternary";
112
+ case LLAMA_FTYPE_MOSTLY_IQ2_XXS: return "IQ2_XXS - 2.0625 bpw";
113
+ case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw";
114
+ case LLAMA_FTYPE_MOSTLY_IQ2_S: return "IQ2_S - 2.5 bpw";
115
+ case LLAMA_FTYPE_MOSTLY_IQ2_M: return "IQ2_M - 2.7 bpw";
116
+ case LLAMA_FTYPE_MOSTLY_IQ3_XS: return "IQ3_XS - 3.3 bpw";
117
+ case LLAMA_FTYPE_MOSTLY_IQ3_XXS: return "IQ3_XXS - 3.0625 bpw";
118
+ case LLAMA_FTYPE_MOSTLY_IQ1_S: return "IQ1_S - 1.5625 bpw";
119
+ case LLAMA_FTYPE_MOSTLY_IQ1_M: return "IQ1_M - 1.75 bpw";
120
+ case LLAMA_FTYPE_MOSTLY_IQ4_NL: return "IQ4_NL - 4.5 bpw";
121
+ case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw";
122
+ case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw";
123
+ case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw";
124
+
125
+ default: return "unknown, may not work";
126
+ }
127
+ }
128
+
129
+ static const char * llama_expert_gating_func_name(llama_expert_gating_func_type type) {
130
+ switch (type) {
131
+ case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX: return "softmax";
132
+ case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID: return "sigmoid";
133
+ default: return "unknown";
134
+ }
135
+ }
136
+
137
+ std::string llama_model_arch_name (const llama_model & model) {
138
+ return llm_arch_name(model.arch);
139
+ }
140
+
141
+ std::string llama_model_type_name (const llama_model & model) {
142
+ return llm_type_name(model.type);
143
+ }
144
+
145
+ std::string llama_model_ftype_name(const llama_model & model) {
146
+ return llama_model_ftype_name(model.ftype);
147
+ }
148
+
149
+ template<typename F>
150
+ static bool buft_supported(lm_ggml_backend_buffer_type_t buft, lm_ggml_backend_dev_t dev, F & fn) {
151
+ lm_ggml_init_params params = {
152
+ /*.mem_size =*/ lm_ggml_tensor_overhead()*8,
153
+ /*.mem_buffer =*/ NULL,
154
+ /*.no_alloc =*/ true,
155
+ };
156
+
157
+ lm_ggml_context_ptr ctx { lm_ggml_init(params) };
158
+ if (!ctx) {
159
+ throw std::runtime_error(format("failed to create ggml context"));
160
+ }
161
+
162
+ lm_ggml_backend_buffer_ptr buf { lm_ggml_backend_buft_alloc_buffer(buft, 0) };
163
+ lm_ggml_tensor * op_tensor = fn(ctx.get());
164
+ for (int i = 0; i < LM_GGML_MAX_SRC; i++) {
165
+ if (op_tensor->src[i] != nullptr) {
166
+ assert(op_tensor->src[i]->buffer == nullptr);
167
+ op_tensor->src[i]->buffer = buf.get();
168
+ }
169
+ }
170
+
171
+ bool op_supported = lm_ggml_backend_dev_supports_op(dev, op_tensor);
172
+
173
+ return op_supported;
174
+ }
175
+
176
+ template<typename F>
177
+ static lm_ggml_backend_buffer_type_t select_buft(const llama_model::buft_list_t & buft_list, const F & fn) {
178
+ for (const auto & cur : buft_list) {
179
+ lm_ggml_backend_dev_t cur_dev = cur.first;
180
+ lm_ggml_backend_buffer_type_t cur_buft = cur.second;
181
+ if (buft_supported(cur_buft, cur_dev, fn)) {
182
+ return cur_buft;
183
+ }
184
+ }
185
+
186
+ throw std::runtime_error(format("no suitable buffer type found"));
187
+ }
188
+
189
+ lm_ggml_backend_buffer_type_t llama_model_select_buft(const llama_model & model, int il) {
190
+ return select_buft(
191
+ *model.dev_layer.at(il).buft_list,
192
+ [&](lm_ggml_context * ctx) {
193
+ lm_ggml_tensor * cur = lm_ggml_new_tensor_1d(ctx, LM_GGML_TYPE_F32, model.hparams.n_embd);
194
+ lm_ggml_tensor * layer_dir = lm_ggml_new_tensor_1d(ctx, LM_GGML_TYPE_F32, model.hparams.n_embd);
195
+ return lm_ggml_add(ctx, cur, layer_dir);
196
+ });
197
+ }
198
+
199
+ struct lm_ggml_tensor * llama_model_get_tensor(const struct llama_model & model, const char * name) {
200
+ auto it = std::find_if(model.tensors_by_name.begin(), model.tensors_by_name.end(),
201
+ [name](const std::pair<std::string, struct lm_ggml_tensor *> & it) {
202
+ return it.first == name;
203
+ });
204
+ if (it == model.tensors_by_name.end()) {
205
+ return nullptr;
206
+ }
207
+
208
+ return it->second;
209
+ }
210
+
211
+ size_t llama_model_max_nodes(const llama_model & model) {
212
+ return std::max<size_t>(8192, model.tensors_by_name.size()*5);
213
+ }
214
+
215
+ static const std::map<llama_rope_scaling_type, const char *> LLAMA_ROPE_SCALING_TYPES = {
216
+ { LLAMA_ROPE_SCALING_TYPE_NONE, "none" },
217
+ { LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" },
218
+ { LLAMA_ROPE_SCALING_TYPE_YARN, "yarn" },
219
+ { LLAMA_ROPE_SCALING_TYPE_LONGROPE, "longrope" },
220
+ };
221
+
222
+ static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) {
223
+ for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
224
+ if (kv.second == name) {
225
+ return (llama_rope_scaling_type) kv.first;
226
+ }
227
+ }
228
+
229
+ return LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
230
+ }
231
+
232
+ // NOTE: avoid ever using this except for building the token_to_piece caches
233
+ static std::string llama_token_to_piece(const struct llama_model * model, llama_token token, bool special) {
234
+ std::string piece;
235
+ piece.resize(piece.capacity()); // using string internal cache
236
+ const int n_chars = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special);
237
+ if (n_chars < 0) {
238
+ piece.resize(-n_chars);
239
+ int check = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special);
240
+ LM_GGML_ASSERT(check == -n_chars);
241
+ }
242
+ else {
243
+ piece.resize(n_chars);
244
+ }
245
+
246
+ return piece;
247
+ }
248
+
249
+ void llm_load_stats(llama_model_loader & ml, llama_model & model) {
250
+ model.n_elements = ml.n_elements;
251
+ model.n_bytes = ml.n_bytes;
252
+ }
253
+
254
+ void llm_load_arch(llama_model_loader & ml, llama_model & model) {
255
+ model.arch = ml.get_arch();
256
+ if (model.arch == LLM_ARCH_UNKNOWN) {
257
+ throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
258
+ }
259
+ }
260
+
261
+ void llm_load_hparams(llama_model_loader & ml, llama_model & model) {
262
+ auto & hparams = model.hparams;
263
+ const lm_gguf_context * ctx = ml.meta.get();
264
+
265
+ // get metadata as string
266
+ for (int i = 0; i < lm_gguf_get_n_kv(ctx); i++) {
267
+ enum lm_gguf_type type = lm_gguf_get_kv_type(ctx, i);
268
+ if (type == LM_GGUF_TYPE_ARRAY) {
269
+ continue;
270
+ }
271
+ const char * name = lm_gguf_get_key(ctx, i);
272
+ const std::string value = lm_gguf_kv_to_str(ctx, i);
273
+ model.lm_gguf_kv.emplace(name, value);
274
+ }
275
+
276
+ // get general kv
277
+ ml.get_key(LLM_KV_GENERAL_NAME, model.name, false);
278
+
279
+ // get hparams kv
280
+ ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab, false);
281
+
282
+ // everything past this point is not vocab-related
283
+ if (hparams.vocab_only) {
284
+ return;
285
+ }
286
+
287
+ ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
288
+ ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
289
+ ml.get_key(LLM_KV_BLOCK_COUNT, hparams.n_layer);
290
+ ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert, false);
291
+ ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
292
+
293
+ if (model.arch == LLM_ARCH_WAVTOKENIZER_DEC) {
294
+ ml.get_key(LLM_KV_FEATURES_LENGTH, hparams.n_embd_features);
295
+
296
+ ml.get_key(LLM_KV_POSNET_EMBEDDING_LENGTH, hparams.posnet.n_embd);
297
+ ml.get_key(LLM_KV_POSNET_BLOCK_COUNT, hparams.posnet.n_layer);
298
+
299
+ ml.get_key(LLM_KV_CONVNEXT_EMBEDDING_LENGTH, hparams.convnext.n_embd);
300
+ ml.get_key(LLM_KV_CONVNEXT_BLOCK_COUNT, hparams.convnext.n_layer);
301
+ }
302
+
303
+ LM_GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
304
+ LM_GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
305
+ if (hparams.n_expert > 0) {
306
+ LM_GGML_ASSERT(hparams.n_expert_used > 0);
307
+ } else {
308
+ LM_GGML_ASSERT(hparams.n_expert_used == 0);
309
+ }
310
+
311
+ // zero-out the array hparams
312
+ std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0);
313
+ std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
314
+ std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0);
315
+
316
+ ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer, false);
317
+ ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false);
318
+
319
+ // n_head_kv is optional, default to n_head
320
+ hparams.n_head_kv_arr = hparams.n_head_arr;
321
+
322
+ ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv_arr, hparams.n_layer, false);
323
+
324
+ bool rope_finetuned = false;
325
+ ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
326
+ hparams.rope_finetuned = rope_finetuned;
327
+
328
+ hparams.n_ctx_orig_yarn = hparams.n_ctx_train;
329
+ ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn, false);
330
+
331
+ // rope_freq_base (optional)
332
+ hparams.rope_freq_base_train = 10000.0f;
333
+ ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
334
+
335
+ std::string rope_scaling("linear");
336
+ ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
337
+ hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
338
+ LM_GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED);
339
+
340
+ // rope_freq_scale (inverse of the kv) is optional
341
+ float ropescale = 0.0f;
342
+ if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
343
+ // try the old key name
344
+ ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
345
+ }
346
+ hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
347
+
348
+ ml.get_key(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor, false);
349
+
350
+ // non-transformer models do not have attention heads
351
+ if (hparams.n_head() > 0) {
352
+ // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
353
+ // gpt-j n_rot = rotary_dim
354
+
355
+ hparams.n_embd_head_k = hparams.n_embd / hparams.n_head();
356
+ ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false);
357
+
358
+ hparams.n_embd_head_v = hparams.n_embd / hparams.n_head();
359
+ ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
360
+
361
+ // sanity check for n_rot (optional)
362
+ hparams.n_rot = hparams.n_embd_head_k;
363
+
364
+ ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
365
+
366
+ if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_DECI || model.arch == LLM_ARCH_FALCON) {
367
+ if (hparams.n_rot != hparams.n_embd_head_k) {
368
+ throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd_head_k));
369
+ }
370
+ }
371
+ } else {
372
+ hparams.n_rot = 0;
373
+ hparams.n_embd_head_k = 0;
374
+ hparams.n_embd_head_v = 0;
375
+ }
376
+
377
+ using e_model = llm_type; // TMP
378
+
379
+ // arch-specific KVs
380
+ switch (model.arch) {
381
+ case LLM_ARCH_LLAMA:
382
+ {
383
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
384
+
385
+ if (hparams.n_expert == 8) {
386
+ switch (hparams.n_layer) {
387
+ case 32: model.type = e_model::MODEL_8x7B; break;
388
+ case 56: model.type = e_model::MODEL_8x22B; break;
389
+ default: model.type = e_model::MODEL_UNKNOWN;
390
+ }
391
+ } else {
392
+ switch (hparams.n_layer) {
393
+ case 16: model.type = e_model::MODEL_1B; break; // Llama 3.2 1B
394
+ case 22: model.type = e_model::MODEL_1B; break;
395
+ case 26: model.type = e_model::MODEL_3B; break;
396
+ case 28: model.type = e_model::MODEL_3B; break; // Llama 3.2 3B
397
+ // granite uses a vocab with len 49152
398
+ case 32: model.type = hparams.n_vocab == 49152 ? e_model::MODEL_3B : (hparams.n_vocab < 40000 ? e_model::MODEL_7B : e_model::MODEL_8B); break;
399
+ case 36: model.type = e_model::MODEL_8B; break; // granite
400
+ case 40: model.type = e_model::MODEL_13B; break;
401
+ case 48: model.type = e_model::MODEL_34B; break;
402
+ case 60: model.type = e_model::MODEL_30B; break;
403
+ case 80: model.type = hparams.n_head() == hparams.n_head_kv() ? e_model::MODEL_65B : e_model::MODEL_70B; break;
404
+ default: model.type = e_model::MODEL_UNKNOWN;
405
+ }
406
+ }
407
+ } break;
408
+ case LLM_ARCH_DECI:
409
+ {
410
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
411
+ switch (hparams.n_layer) {
412
+ case 32: model.type = e_model::MODEL_7B; break;
413
+ case 80: model.type = e_model::MODEL_70B; break;
414
+ default: model.type = e_model::MODEL_UNKNOWN;
415
+ }
416
+ } break;
417
+ case LLM_ARCH_MINICPM:
418
+ {
419
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
420
+ ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale);
421
+ ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale);
422
+ ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
423
+
424
+ switch (hparams.n_layer) {
425
+ case 52: model.type = e_model::MODEL_1B; break;
426
+ case 40: model.type = e_model::MODEL_2B; break;
427
+ default: model.type = e_model::MODEL_UNKNOWN;
428
+ }
429
+ } break;
430
+ case LLM_ARCH_MINICPM3:
431
+ {
432
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
433
+ ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
434
+ ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
435
+
436
+ switch (hparams.n_layer) {
437
+ case 62: model.type = e_model::MODEL_4B; break;
438
+ default: model.type = e_model::MODEL_UNKNOWN;
439
+ }
440
+ } break;
441
+ case LLM_ARCH_GROK:
442
+ {
443
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
444
+
445
+ switch (hparams.n_layer) {
446
+ case 64: model.type = e_model::MODEL_314B; break;
447
+ default: model.type = e_model::MODEL_UNKNOWN;
448
+ }
449
+ } break;
450
+ case LLM_ARCH_FALCON:
451
+ {
452
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
453
+
454
+ switch (hparams.n_layer) {
455
+ case 32: model.type = e_model::MODEL_7B; break;
456
+ case 60: model.type = e_model::MODEL_40B; break;
457
+ default: model.type = e_model::MODEL_UNKNOWN;
458
+ }
459
+ } break;
460
+ case LLM_ARCH_BAICHUAN:
461
+ {
462
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
463
+ switch (hparams.n_layer) {
464
+ case 32: model.type = e_model::MODEL_7B; break;
465
+ case 40: model.type = e_model::MODEL_13B; break;
466
+ default: model.type = e_model::MODEL_UNKNOWN;
467
+ }
468
+
469
+ if (model.type == e_model::MODEL_13B) {
470
+ // TODO: become GGUF KV parameter
471
+ hparams.f_max_alibi_bias = 8.0f;
472
+ }
473
+ } break;
474
+ case LLM_ARCH_STARCODER:
475
+ {
476
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
477
+ switch (hparams.n_layer) {
478
+ case 24: model.type = e_model::MODEL_1B; break;
479
+ case 36: model.type = e_model::MODEL_3B; break;
480
+ case 42: model.type = e_model::MODEL_7B; break;
481
+ case 40: model.type = e_model::MODEL_15B; break;
482
+ default: model.type = e_model::MODEL_UNKNOWN;
483
+ }
484
+ } break;
485
+ case LLM_ARCH_REFACT:
486
+ {
487
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
488
+ switch (hparams.n_layer) {
489
+ case 32: model.type = e_model::MODEL_1B; break;
490
+ default: model.type = e_model::MODEL_UNKNOWN;
491
+ }
492
+
493
+ // TODO: become GGUF KV parameter
494
+ hparams.f_max_alibi_bias = 8.0f;
495
+ } break;
496
+ case LLM_ARCH_BERT:
497
+ {
498
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
499
+ ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
500
+ ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
501
+ ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
502
+
503
+ switch (hparams.n_layer) {
504
+ case 3:
505
+ model.type = e_model::MODEL_17M; break; // bge-micro
506
+ case 6:
507
+ model.type = e_model::MODEL_22M; break; // MiniLM-L6
508
+ case 12:
509
+ switch (hparams.n_embd) {
510
+ case 384: model.type = e_model::MODEL_33M; break; // MiniLM-L12, bge-small
511
+ case 768: model.type = e_model::MODEL_109M; break; // bge-base
512
+ default: model.type = e_model::MODEL_UNKNOWN;
513
+ } break;
514
+ case 24:
515
+ model.type = e_model::MODEL_335M; break; // bge-large
516
+ default: model.type = e_model::MODEL_UNKNOWN;
517
+ }
518
+ } break;
519
+ case LLM_ARCH_JINA_BERT_V2:
520
+ {
521
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
522
+ ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
523
+ ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
524
+ ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
525
+ hparams.f_max_alibi_bias = 8.0f;
526
+
527
+ switch (hparams.n_layer) {
528
+ case 4: model.type = e_model::MODEL_33M; break; // jina-embeddings-small
529
+ case 12: model.type = e_model::MODEL_137M; break; // jina-embeddings-base
530
+ default: model.type = e_model::MODEL_UNKNOWN;
531
+ }
532
+ } break;
533
+ case LLM_ARCH_NOMIC_BERT:
534
+ {
535
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
536
+ ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
537
+ ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
538
+ ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
539
+
540
+ if (hparams.n_layer == 12 && hparams.n_embd == 768) {
541
+ model.type = e_model::MODEL_137M;
542
+ }
543
+ } break;
544
+ case LLM_ARCH_BLOOM:
545
+ {
546
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
547
+
548
+ switch (hparams.n_layer) {
549
+ case 24: model.type = e_model::MODEL_1B; break;
550
+ case 30:
551
+ switch (hparams.n_embd) {
552
+ case 2560: model.type = e_model::MODEL_3B; break;
553
+ case 4096: model.type = e_model::MODEL_7B; break;
554
+ default: model.type = e_model::MODEL_UNKNOWN;
555
+ } break;
556
+ default: model.type = e_model::MODEL_UNKNOWN;
557
+ }
558
+
559
+ // TODO: become GGUF KV parameter
560
+ hparams.f_max_alibi_bias = 8.0f;
561
+ } break;
562
+ case LLM_ARCH_MPT:
563
+ {
564
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
565
+ ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
566
+ ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
567
+
568
+ switch (hparams.n_layer) {
569
+ case 32: model.type = e_model::MODEL_7B; break;
570
+ case 48: model.type = e_model::MODEL_30B; break;
571
+ default: model.type = e_model::MODEL_UNKNOWN;
572
+ }
573
+ } break;
574
+ case LLM_ARCH_STABLELM:
575
+ {
576
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
577
+
578
+ switch (hparams.n_layer) {
579
+ case 24: model.type = e_model::MODEL_1B; break;
580
+ case 32: model.type = e_model::MODEL_3B; break;
581
+ case 40: model.type = e_model::MODEL_12B; break;
582
+ default: model.type = e_model::MODEL_UNKNOWN;
583
+ }
584
+ } break;
585
+ case LLM_ARCH_QWEN:
586
+ {
587
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
588
+
589
+ switch (hparams.n_layer) {
590
+ case 32: model.type = e_model::MODEL_7B; break;
591
+ case 40: model.type = e_model::MODEL_13B; break;
592
+ default: model.type = e_model::MODEL_UNKNOWN;
593
+ }
594
+ } break;
595
+ case LLM_ARCH_QWEN2VL:
596
+ {
597
+ ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, true);
598
+ }
599
+ // fall through
600
+ case LLM_ARCH_QWEN2:
601
+ {
602
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
603
+ switch (hparams.n_layer) {
604
+ case 24: model.type = hparams.n_embd == 1024 ? e_model::MODEL_0_5B : e_model::MODEL_1B; break;
605
+ case 28: model.type = hparams.n_embd == 1536 ? e_model::MODEL_1_5B : e_model::MODEL_7B; break;
606
+ case 32: model.type = e_model::MODEL_7B; break;
607
+ case 36: model.type = e_model::MODEL_3B; break;
608
+ case 40: model.type = hparams.n_head() == 20 ? e_model::MODEL_4B : e_model::MODEL_13B; break;
609
+ case 48: model.type = e_model::MODEL_14B; break;
610
+ case 64: model.type = e_model::MODEL_32B; break;
611
+ case 80: model.type = e_model::MODEL_70B; break;
612
+ default: model.type = e_model::MODEL_UNKNOWN;
613
+ }
614
+ } break;
615
+ case LLM_ARCH_QWEN2MOE:
616
+ {
617
+ ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
618
+ ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
619
+
620
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
621
+ switch (hparams.n_layer) {
622
+ case 24: model.type = e_model::MODEL_A2_7B; break;
623
+ case 28: model.type = e_model::MODEL_57B_A14B; break;
624
+ default: model.type = e_model::MODEL_UNKNOWN;
625
+ }
626
+ } break;
627
+ case LLM_ARCH_PHI2:
628
+ {
629
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
630
+
631
+ switch (hparams.n_layer) {
632
+ case 24: model.type = e_model::MODEL_1B; break;
633
+ case 32: model.type = e_model::MODEL_3B; break;
634
+ default: model.type = e_model::MODEL_UNKNOWN;
635
+ }
636
+ } break;
637
+ case LLM_ARCH_PHI3:
638
+ {
639
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
640
+
641
+ switch (hparams.n_layer) {
642
+ case 24: model.type = e_model::MODEL_1B; break;
643
+ case 32: model.type = e_model::MODEL_3B; break;
644
+ case 40: model.type = e_model::MODEL_14B; break;
645
+ default: model.type = e_model::MODEL_UNKNOWN;
646
+ }
647
+
648
+ // for backward compatibility ; see: https://github.com/ggerganov/llama.cpp/pull/8931
649
+ if ((hparams.n_layer == 32 || hparams.n_layer == 40) && hparams.n_ctx_train == 4096) {
650
+ // default value for Phi-3-mini-4k-instruct and Phi-3-medium-4k-instruct
651
+ hparams.n_swa = 2047;
652
+ } else if (hparams.n_layer == 32 && hparams.n_head_kv(0) == 32 && hparams.n_ctx_train == 131072) {
653
+ // default value for Phi-3-mini-128k-instruct
654
+ hparams.n_swa = 262144;
655
+ } else if (hparams.n_layer == 40 && hparams.n_ctx_train == 131072) {
656
+ // default value for Phi-3-medium-128k-instruct
657
+ hparams.n_swa = 131072;
658
+ }
659
+ bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
660
+ if (!found_swa && hparams.n_swa == 0) {
661
+ throw std::runtime_error("invalid value for sliding_window");
662
+ }
663
+ } break;
664
+ case LLM_ARCH_PLAMO:
665
+ {
666
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
667
+
668
+ switch (hparams.n_layer) {
669
+ case 40: model.type = e_model::MODEL_13B; break;
670
+ default: model.type = e_model::MODEL_UNKNOWN;
671
+ }
672
+ } break;
673
+ case LLM_ARCH_GPT2:
674
+ {
675
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
676
+ switch (hparams.n_layer) {
677
+ case 12: model.type = e_model::MODEL_SMALL; break;
678
+ case 24: model.type = e_model::MODEL_MEDIUM; break;
679
+ case 36: model.type = e_model::MODEL_LARGE; break;
680
+ case 48: model.type = e_model::MODEL_XL; break;
681
+ default: model.type = e_model::MODEL_UNKNOWN;
682
+ }
683
+ } break;
684
+ case LLM_ARCH_CODESHELL:
685
+ {
686
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
687
+ switch (hparams.n_layer) {
688
+ case 42: model.type = e_model::MODEL_7B; break;
689
+ default: model.type = e_model::MODEL_UNKNOWN;
690
+ }
691
+ } break;
692
+ case LLM_ARCH_ORION:
693
+ {
694
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
695
+
696
+ switch (hparams.n_layer) {
697
+ case 40: model.type = e_model::MODEL_14B; break;
698
+ default: model.type = e_model::MODEL_UNKNOWN;
699
+ }
700
+ } break;
701
+ case LLM_ARCH_INTERNLM2:
702
+ {
703
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
704
+ switch (hparams.n_layer) {
705
+ case 32: model.type = e_model::MODEL_7B; break;
706
+ case 48: model.type = e_model::MODEL_20B; break;
707
+ default: model.type = e_model::MODEL_UNKNOWN;
708
+ }
709
+ } break;
710
+ case LLM_ARCH_GEMMA:
711
+ {
712
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
713
+
714
+ switch (hparams.n_layer) {
715
+ case 18: model.type = e_model::MODEL_2B; break;
716
+ case 28: model.type = e_model::MODEL_7B; break;
717
+ default: model.type = e_model::MODEL_UNKNOWN;
718
+ }
719
+ } break;
720
+ case LLM_ARCH_GEMMA2:
721
+ {
722
+ hparams.n_swa = 4096; // default value of gemma 2
723
+ ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
724
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
725
+ ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false);
726
+ ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false);
727
+ hparams.attn_soft_cap = true;
728
+
729
+ switch (hparams.n_layer) {
730
+ case 26: model.type = e_model::MODEL_2B; break;
731
+ case 42: model.type = e_model::MODEL_9B; break;
732
+ case 46: model.type = e_model::MODEL_27B; break;
733
+ default: model.type = e_model::MODEL_UNKNOWN;
734
+ }
735
+ } break;
736
+ case LLM_ARCH_STARCODER2:
737
+ {
738
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
739
+ switch (hparams.n_layer) {
740
+ case 30: model.type = e_model::MODEL_3B; break;
741
+ case 32: model.type = e_model::MODEL_7B; break;
742
+ case 40: model.type = e_model::MODEL_15B; break;
743
+ case 52: model.type = e_model::MODEL_20B; break; // granite
744
+ case 88: model.type = e_model::MODEL_34B; break; // granite
745
+ default: model.type = e_model::MODEL_UNKNOWN;
746
+ }
747
+ } break;
748
+ case LLM_ARCH_MAMBA:
749
+ {
750
+ ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv);
751
+ ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner);
752
+ ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state);
753
+ ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
754
+ ml.get_key(LLM_KV_SSM_DT_B_C_RMS, hparams.ssm_dt_b_c_rms, false);
755
+
756
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
757
+
758
+ switch (hparams.n_layer) {
759
+ case 24:
760
+ switch (hparams.n_embd) {
761
+ case 768: model.type = e_model::MODEL_SMALL; break;
762
+ default: model.type = e_model::MODEL_UNKNOWN;
763
+ } break;
764
+ case 48:
765
+ switch (hparams.n_embd) {
766
+ case 1024: model.type = e_model::MODEL_MEDIUM; break;
767
+ case 1536: model.type = e_model::MODEL_LARGE; break;
768
+ case 2048: model.type = e_model::MODEL_XL; break;
769
+ default: model.type = e_model::MODEL_UNKNOWN;
770
+ } break;
771
+ case 64:
772
+ switch (hparams.n_embd) {
773
+ case 2560: model.type = e_model::MODEL_3B; break;
774
+ default: model.type = e_model::MODEL_UNKNOWN;
775
+ } break;
776
+ default: model.type = e_model::MODEL_UNKNOWN;
777
+ }
778
+ } break;
779
+ case LLM_ARCH_XVERSE:
780
+ {
781
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
782
+ switch (hparams.n_layer) {
783
+ case 32: model.type = e_model::MODEL_7B; break;
784
+ case 40: model.type = e_model::MODEL_13B; break;
785
+ case 80: model.type = e_model::MODEL_65B; break;
786
+ default: model.type = e_model::MODEL_UNKNOWN;
787
+ }
788
+ } break;
789
+ case LLM_ARCH_COMMAND_R:
790
+ {
791
+ ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
792
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
793
+ switch (hparams.n_layer) {
794
+ case 40: model.type = e_model::MODEL_35B; break;
795
+ default: model.type = e_model::MODEL_UNKNOWN;
796
+ }
797
+ } break;
798
+ case LLM_ARCH_COHERE2:
799
+ {
800
+ ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
801
+ ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
802
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
803
+ switch (hparams.n_layer) {
804
+ case 32: model.type = e_model::MODEL_8B; break;
805
+ default: model.type = e_model::MODEL_UNKNOWN;
806
+ }
807
+ } break;
808
+ case LLM_ARCH_DBRX:
809
+ {
810
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
811
+ ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv);
812
+
813
+ switch (hparams.n_layer) {
814
+ case 40: model.type = e_model::MODEL_16x12B; break;
815
+ default: model.type = e_model::MODEL_UNKNOWN;
816
+ }
817
+ } break;
818
+ case LLM_ARCH_OLMO:
819
+ {
820
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
821
+ ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
822
+
823
+ switch (hparams.n_layer) {
824
+ case 22: model.type = e_model::MODEL_1B; break;
825
+ case 32: model.type = e_model::MODEL_7B; break;
826
+ case 80: model.type = e_model::MODEL_70B; break;
827
+ default: model.type = e_model::MODEL_UNKNOWN;
828
+ }
829
+ } break;
830
+ case LLM_ARCH_OLMO2:
831
+ {
832
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
833
+
834
+ switch (hparams.n_layer) {
835
+ case 16: model.type = e_model::MODEL_1B; break;
836
+ case 32: model.type = e_model::MODEL_7B; break;
837
+ case 40: model.type = e_model::MODEL_13B; break;
838
+ default: model.type = e_model::MODEL_UNKNOWN;
839
+ }
840
+ } break;
841
+ case LLM_ARCH_OLMOE:
842
+ {
843
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
844
+ switch (hparams.n_layer) {
845
+ case 16: model.type = e_model::MODEL_A1_7B; break;
846
+ default: model.type = e_model::MODEL_UNKNOWN;
847
+ }
848
+ } break;
849
+ case LLM_ARCH_OPENELM:
850
+ {
851
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
852
+
853
+ switch (hparams.n_layer) {
854
+ case 16: model.type = e_model::MODEL_270M; break;
855
+ case 20: model.type = e_model::MODEL_450M; break;
856
+ case 28: model.type = e_model::MODEL_1B; break;
857
+ case 36: model.type = e_model::MODEL_3B; break;
858
+ default: model.type = e_model::MODEL_UNKNOWN;
859
+ }
860
+ } break;
861
+ case LLM_ARCH_GPTNEOX:
862
+ {
863
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
864
+ ml.get_key(LLM_KV_USE_PARALLEL_RESIDUAL, hparams.use_par_res);
865
+ switch (hparams.n_layer) {
866
+ case 6:
867
+ switch (hparams.n_ff()) {
868
+ case 512: model.type = e_model::MODEL_14M; break;
869
+ case 2048: model.type = e_model::MODEL_70M; break;
870
+ default: model.type = e_model::MODEL_UNKNOWN;
871
+ } break;
872
+ case 12:
873
+ switch (hparams.n_ff()) {
874
+ case 3072: model.type = e_model::MODEL_160M; break;
875
+ default: model.type = e_model::MODEL_UNKNOWN;
876
+ } break;
877
+ case 16:
878
+ switch (hparams.n_ff()) {
879
+ case 8192: model.type = e_model::MODEL_1B; break;
880
+ default: model.type = e_model::MODEL_UNKNOWN;
881
+ } break;
882
+ case 24:
883
+ switch (hparams.n_ff()) {
884
+ case 4096: model.type = e_model::MODEL_410M; break;
885
+ case 8192: model.type = e_model::MODEL_1_4B; break;
886
+ default: model.type = e_model::MODEL_UNKNOWN;
887
+ } break;
888
+ case 32:
889
+ switch (hparams.n_ff()) {
890
+ case 10240: model.type = e_model::MODEL_2_8B; break;
891
+ case 16384: model.type = e_model::MODEL_6_9B; break;
892
+ default: model.type = e_model::MODEL_UNKNOWN;
893
+ } break;
894
+ case 36:
895
+ switch (hparams.n_ff()) {
896
+ case 20480: model.type = e_model::MODEL_12B; break;
897
+ default: model.type = e_model::MODEL_UNKNOWN;
898
+ } break;
899
+ case 44:
900
+ switch (hparams.n_ff()) {
901
+ case 24576: model.type = e_model::MODEL_20B; break;
902
+ default: model.type = e_model::MODEL_UNKNOWN;
903
+ } break;
904
+ default: model.type = e_model::MODEL_UNKNOWN;
905
+ }
906
+ } break;
907
+ case LLM_ARCH_ARCTIC:
908
+ {
909
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
910
+
911
+ if (hparams.n_expert == 128) {
912
+ switch (hparams.n_layer) {
913
+ case 35: model.type = e_model::MODEL_10B_128x3_66B; break;
914
+ default: model.type = e_model::MODEL_UNKNOWN;
915
+ }
916
+ } else {
917
+ model.type = e_model::MODEL_UNKNOWN;
918
+ }
919
+ } break;
920
+ case LLM_ARCH_DEEPSEEK:
921
+ {
922
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
923
+ ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
924
+ ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
925
+ ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
926
+ ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
927
+
928
+ switch (hparams.n_layer) {
929
+ case 28: model.type = e_model::MODEL_20B; break;
930
+ default: model.type = e_model::MODEL_UNKNOWN;
931
+ }
932
+ } break;
933
+ case LLM_ARCH_DEEPSEEK2:
934
+ {
935
+ bool is_lite = (hparams.n_layer == 27);
936
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
937
+ ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
938
+ if (!is_lite) {
939
+ ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
940
+ }
941
+ ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
942
+ ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
943
+ ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
944
+ ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
945
+ ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false);
946
+ ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false);
947
+ if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) {
948
+ // for compatibility with existing DeepSeek V2 and V2.5 GGUFs
949
+ // that have no expert_gating_func model parameter set
950
+ hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX;
951
+ }
952
+ ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul);
953
+
954
+ switch (hparams.n_layer) {
955
+ case 27: model.type = e_model::MODEL_16B; break;
956
+ case 60: model.type = e_model::MODEL_236B; break;
957
+ case 61: model.type = e_model::MODEL_671B; break;
958
+ default: model.type = e_model::MODEL_UNKNOWN;
959
+ }
960
+ } break;
961
+ case LLM_ARCH_CHATGLM:
962
+ {
963
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
964
+ switch (hparams.n_layer) {
965
+ case 28: model.type = e_model::MODEL_6B; break;
966
+ case 40: model.type = e_model::MODEL_9B; break;
967
+ default: model.type = e_model::MODEL_UNKNOWN;
968
+ }
969
+ } break;
970
+ case LLM_ARCH_BITNET:
971
+ {
972
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
973
+
974
+ switch (hparams.n_layer) {
975
+ case 26: model.type = e_model::MODEL_3B; break;
976
+ default: model.type = e_model::MODEL_UNKNOWN;
977
+ }
978
+ } break;
979
+ case LLM_ARCH_T5:
980
+ {
981
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
982
+ ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
983
+
984
+ uint32_t dec_start_token_id;
985
+ if (ml.get_key(LLM_KV_DECODER_START_TOKEN_ID, dec_start_token_id, false)) {
986
+ hparams.dec_start_token_id = dec_start_token_id;
987
+ }
988
+
989
+ switch (hparams.n_layer) {
990
+ case 6: model.type = e_model::MODEL_60M; break; // t5-small
991
+ case 8: model.type = e_model::MODEL_80M; break; // flan-t5-small
992
+ case 12:
993
+ switch (hparams.n_ff()) {
994
+ case 3072: model.type = e_model::MODEL_220M; break; // t5-base
995
+ case 2048: model.type = e_model::MODEL_250M; break; // flan-t5-base
996
+ default: model.type = e_model::MODEL_UNKNOWN;
997
+ } break;
998
+ case 24:
999
+ switch (hparams.n_ff()) {
1000
+ case 4096: model.type = e_model::MODEL_770M; break; // t5-large
1001
+ case 2816: model.type = e_model::MODEL_780M; break; // flan-t5-large
1002
+ case 16384: model.type = e_model::MODEL_3B; break; // t5-3b
1003
+ case 5120: model.type = e_model::MODEL_3B; break; // flan-t5-xl
1004
+ case 65536: model.type = e_model::MODEL_11B; break; // t5-11b
1005
+ case 10240: model.type = e_model::MODEL_11B; break; // flan-t5-xxl
1006
+ default: model.type = e_model::MODEL_UNKNOWN;
1007
+ } break;
1008
+ default: model.type = e_model::MODEL_UNKNOWN;
1009
+ }
1010
+ } break;
1011
+ case LLM_ARCH_T5ENCODER:
1012
+ {
1013
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1014
+ ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
1015
+ model.type = e_model::MODEL_UNKNOWN;
1016
+ } break;
1017
+ case LLM_ARCH_JAIS:
1018
+ {
1019
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1020
+ ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
1021
+
1022
+ switch (hparams.n_layer) {
1023
+ case 24: model.type = e_model::MODEL_1_3B; break;
1024
+ case 40: model.type = e_model::MODEL_13B; break;
1025
+ /* TODO: add variants */
1026
+ default: model.type = e_model::MODEL_UNKNOWN;
1027
+ }
1028
+ } break;
1029
+ case LLM_ARCH_NEMOTRON:
1030
+ {
1031
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1032
+ switch (hparams.n_layer) {
1033
+ case 32: model.type = e_model::MODEL_4B; break;
1034
+ default: model.type = e_model::MODEL_UNKNOWN;
1035
+ }
1036
+ } break;
1037
+ case LLM_ARCH_EXAONE:
1038
+ {
1039
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1040
+
1041
+ switch (hparams.n_layer) {
1042
+ case 32: model.type = e_model::MODEL_8B; break;
1043
+ default: model.type = e_model::MODEL_UNKNOWN;
1044
+ }
1045
+ } break;
1046
+ case LLM_ARCH_RWKV6:
1047
+ {
1048
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1049
+ ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size);
1050
+ ml.get_key(LLM_KV_TIME_MIX_EXTRA_DIM, hparams.time_mix_extra_dim);
1051
+ ml.get_key(LLM_KV_TIME_DECAY_EXTRA_DIM, hparams.time_decay_extra_dim);
1052
+ ml.get_key(LLM_KV_RESCALE_EVERY_N_LAYERS, hparams.rescale_every_n_layers, false);
1053
+
1054
+ switch (hparams.n_layer) {
1055
+ case 24: model.type = e_model::MODEL_1_6B; break;
1056
+ case 32:
1057
+ switch (hparams.n_embd) {
1058
+ case 2560: model.type = e_model::MODEL_3B; break;
1059
+ case 4096: model.type = e_model::MODEL_7B; break;
1060
+ default: model.type = e_model::MODEL_UNKNOWN;
1061
+ } break;
1062
+ case 61: model.type = e_model::MODEL_14B; break;
1063
+ default: model.type = e_model::MODEL_UNKNOWN;
1064
+ }
1065
+ } break;
1066
+ case LLM_ARCH_GRANITE:
1067
+ case LLM_ARCH_GRANITE_MOE:
1068
+ {
1069
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1070
+ ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
1071
+ ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale);
1072
+ ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale);
1073
+ ml.get_key(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale);
1074
+
1075
+ switch (hparams.n_layer) {
1076
+ case 32: model.type = e_model::MODEL_3B; break;
1077
+ case 40: model.type = e_model::MODEL_3B; break;
1078
+ // Add additional layer/vocab/etc checks here for other model sizes
1079
+ default: model.type = e_model::MODEL_UNKNOWN;
1080
+ }
1081
+ } break;
1082
+ case LLM_ARCH_CHAMELEON:
1083
+ {
1084
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1085
+ hparams.f_norm_eps = 1e-5; // eps for qk-norm, torch default
1086
+ ml.get_key(LLM_KV_SWIN_NORM, hparams.swin_norm);
1087
+
1088
+ switch (hparams.n_layer) {
1089
+ case 32: model.type = e_model::MODEL_7B; break;
1090
+ case 48: model.type = e_model::MODEL_34B; break;
1091
+ default: model.type = e_model::MODEL_UNKNOWN;
1092
+ }
1093
+ } break;
1094
+ case LLM_ARCH_WAVTOKENIZER_DEC:
1095
+ {
1096
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1097
+ ml.get_key(LLM_KV_ATTENTION_GROUPNORM_EPS, hparams.f_norm_group_eps);
1098
+ ml.get_key(LLM_KV_ATTENTION_GROUPNORM_GROUPS, hparams.n_norm_groups);
1099
+ ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
1100
+ } break;
1101
+ default: throw std::runtime_error("unsupported model architecture");
1102
+ }
1103
+
1104
+ model.ftype = ml.ftype;
1105
+
1106
+ if (hparams.f_max_alibi_bias > 0.0f) {
1107
+ hparams.use_alibi = true;
1108
+ }
1109
+
1110
+ hparams.rope_type = llama_rope_type(&model);
1111
+ }
1112
+
1113
+ void llm_load_vocab(llama_model_loader & ml, llama_model & model) {
1114
+ auto & vocab = model.vocab;
1115
+
1116
+ struct lm_gguf_context * ctx = ml.meta.get();
1117
+
1118
+ const auto kv = LLM_KV(model.arch);
1119
+
1120
+ // determine vocab type
1121
+ {
1122
+ std::string tokenizer_model;
1123
+ std::string tokenizer_pre;
1124
+
1125
+ ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_model);
1126
+ ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
1127
+
1128
+ if (tokenizer_model == "no_vocab" || tokenizer_model == "none") {
1129
+ vocab.type = LLAMA_VOCAB_TYPE_NONE;
1130
+
1131
+ // default special tokens
1132
+ vocab.special_bos_id = LLAMA_TOKEN_NULL;
1133
+ vocab.special_eos_id = LLAMA_TOKEN_NULL;
1134
+ vocab.special_unk_id = LLAMA_TOKEN_NULL;
1135
+ vocab.special_sep_id = LLAMA_TOKEN_NULL;
1136
+ vocab.special_pad_id = LLAMA_TOKEN_NULL;
1137
+ vocab.special_cls_id = LLAMA_TOKEN_NULL;
1138
+ vocab.special_mask_id = LLAMA_TOKEN_NULL;
1139
+ vocab.linefeed_id = LLAMA_TOKEN_NULL;
1140
+
1141
+ // read vocab size from metadata
1142
+ if (!ml.get_key(LLM_KV_VOCAB_SIZE, vocab.n_vocab, false)) {
1143
+ vocab.n_vocab = 0;
1144
+ LLAMA_LOG_WARN("%s: there is no vocab_size in metadata, vocab.n_vocab will be set to %u\n", __func__, vocab.n_vocab);
1145
+ }
1146
+ return;
1147
+ }
1148
+
1149
+ if (tokenizer_model == "llama") {
1150
+ vocab.type = LLAMA_VOCAB_TYPE_SPM;
1151
+
1152
+ // default special tokens
1153
+ vocab.special_bos_id = 1;
1154
+ vocab.special_eos_id = 2;
1155
+ vocab.special_unk_id = 0;
1156
+ vocab.special_sep_id = LLAMA_TOKEN_NULL;
1157
+ vocab.special_pad_id = LLAMA_TOKEN_NULL;
1158
+ vocab.special_cls_id = LLAMA_TOKEN_NULL;
1159
+ vocab.special_mask_id = LLAMA_TOKEN_NULL;
1160
+ } else if (tokenizer_model == "bert") {
1161
+ vocab.type = LLAMA_VOCAB_TYPE_WPM;
1162
+
1163
+ // default special tokens
1164
+ vocab.special_bos_id = LLAMA_TOKEN_NULL;
1165
+ vocab.special_eos_id = LLAMA_TOKEN_NULL;
1166
+ vocab.special_unk_id = 100;
1167
+ vocab.special_sep_id = 102;
1168
+ vocab.special_pad_id = 0;
1169
+ vocab.special_cls_id = 101;
1170
+ vocab.special_mask_id = 103;
1171
+ } else if (tokenizer_model == "gpt2") {
1172
+ vocab.type = LLAMA_VOCAB_TYPE_BPE;
1173
+
1174
+ // read bpe merges and populate bpe ranks
1175
+ const int merges_keyidx = lm_gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
1176
+ if (merges_keyidx == -1) {
1177
+ throw std::runtime_error("cannot find tokenizer merges in model file\n");
1178
+ }
1179
+
1180
+ const int n_merges = lm_gguf_get_arr_n(ctx, merges_keyidx);
1181
+ for (int i = 0; i < n_merges; i++) {
1182
+ const std::string word = lm_gguf_get_arr_str(ctx, merges_keyidx, i);
1183
+ LM_GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
1184
+
1185
+ std::string first;
1186
+ std::string second;
1187
+
1188
+ const size_t pos = word.find(' ', 1);
1189
+
1190
+ if (pos != std::string::npos) {
1191
+ first = word.substr(0, pos);
1192
+ second = word.substr(pos + 1);
1193
+ }
1194
+
1195
+ vocab.bpe_ranks.emplace(std::make_pair(first, second), i);
1196
+ }
1197
+
1198
+ // default special tokens
1199
+ vocab.special_bos_id = 11;
1200
+ vocab.special_eos_id = 11;
1201
+ vocab.special_unk_id = LLAMA_TOKEN_NULL;
1202
+ vocab.special_sep_id = LLAMA_TOKEN_NULL;
1203
+ vocab.special_pad_id = LLAMA_TOKEN_NULL;
1204
+ vocab.special_cls_id = LLAMA_TOKEN_NULL;
1205
+ vocab.special_mask_id = LLAMA_TOKEN_NULL;
1206
+ } else if (tokenizer_model == "t5") {
1207
+ vocab.type = LLAMA_VOCAB_TYPE_UGM;
1208
+
1209
+ // default special tokens
1210
+ vocab.special_bos_id = LLAMA_TOKEN_NULL;
1211
+ vocab.special_eos_id = 1;
1212
+ vocab.special_unk_id = 2;
1213
+ vocab.special_sep_id = LLAMA_TOKEN_NULL;
1214
+ vocab.special_pad_id = 0;
1215
+ vocab.special_cls_id = LLAMA_TOKEN_NULL;
1216
+ vocab.special_mask_id = LLAMA_TOKEN_NULL;
1217
+
1218
+ const int precompiled_charsmap_keyidx = lm_gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str());
1219
+ if (precompiled_charsmap_keyidx != -1) {
1220
+ size_t n_precompiled_charsmap = lm_gguf_get_arr_n(ctx, precompiled_charsmap_keyidx);
1221
+ const char * precompiled_charsmap = (const char *) lm_gguf_get_arr_data(ctx, precompiled_charsmap_keyidx);
1222
+ vocab.precompiled_charsmap.assign(precompiled_charsmap, precompiled_charsmap + n_precompiled_charsmap);
1223
+ #ifdef IS_BIG_ENDIAN
1224
+ // correct endiannes of data in precompiled_charsmap binary blob
1225
+ uint32_t * xcda_blob_size = (uint32_t *) &vocab.precompiled_charsmap[0];
1226
+ *xcda_blob_size = __builtin_bswap32(*xcda_blob_size);
1227
+ assert(*xcda_blob_size + sizeof(uint32_t) < n_precompiled_charsmap);
1228
+ size_t xcda_array_size = *xcda_blob_size / sizeof(uint32_t);
1229
+ uint32_t * xcda_array = (uint32_t *) &vocab.precompiled_charsmap[sizeof(uint32_t)];
1230
+ for (size_t i = 0; i < xcda_array_size; ++i) {
1231
+ xcda_array[i] = __builtin_bswap32(xcda_array[i]);
1232
+ }
1233
+ #endif
1234
+ }
1235
+ } else if (tokenizer_model == "rwkv") {
1236
+ vocab.type = LLAMA_VOCAB_TYPE_RWKV;
1237
+
1238
+ // default special tokens
1239
+ vocab.special_bos_id = LLAMA_TOKEN_NULL;
1240
+ vocab.special_eos_id = LLAMA_TOKEN_NULL;
1241
+ vocab.special_unk_id = LLAMA_TOKEN_NULL;
1242
+ vocab.special_sep_id = LLAMA_TOKEN_NULL;
1243
+ vocab.special_pad_id = LLAMA_TOKEN_NULL;
1244
+ } else {
1245
+ throw std::runtime_error(format("unknown tokenizer: '%s'", tokenizer_model.c_str()));
1246
+ }
1247
+
1248
+ // for now, only BPE models have pre-tokenizers
1249
+ if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
1250
+ vocab.tokenizer_add_space_prefix = false;
1251
+ vocab.tokenizer_clean_spaces = true;
1252
+ if (tokenizer_pre.empty()) {
1253
+ LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__);
1254
+ LLAMA_LOG_WARN("%s: \n", __func__);
1255
+ LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
1256
+ LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED! \n", __func__);
1257
+ LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL \n", __func__);
1258
+ LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
1259
+ LLAMA_LOG_WARN("%s: \n", __func__);
1260
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
1261
+ } else if (tokenizer_pre == "default") {
1262
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
1263
+ } else if (
1264
+ tokenizer_pre == "llama3" ||
1265
+ tokenizer_pre == "llama-v3" ||
1266
+ tokenizer_pre == "llama-bpe"||
1267
+ tokenizer_pre == "falcon3") {
1268
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3;
1269
+ vocab.tokenizer_ignore_merges = true;
1270
+ vocab.tokenizer_add_bos = true;
1271
+ } else if (
1272
+ tokenizer_pre == "deepseek-llm") {
1273
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM;
1274
+ vocab.tokenizer_clean_spaces = false;
1275
+ } else if (
1276
+ tokenizer_pre == "deepseek-coder") {
1277
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER;
1278
+ vocab.tokenizer_clean_spaces = false;
1279
+ } else if (
1280
+ tokenizer_pre == "deepseek-v3") {
1281
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM;
1282
+ vocab.tokenizer_clean_spaces = false;
1283
+ } else if (
1284
+ tokenizer_pre == "falcon") {
1285
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON;
1286
+ } else if (
1287
+ tokenizer_pre == "mpt") {
1288
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MPT;
1289
+ } else if (
1290
+ tokenizer_pre == "starcoder") {
1291
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER;
1292
+ } else if (
1293
+ tokenizer_pre == "gpt-2" ||
1294
+ tokenizer_pre == "phi-2" ||
1295
+ tokenizer_pre == "jina-es" ||
1296
+ tokenizer_pre == "jina-de" ||
1297
+ tokenizer_pre == "gigachat" ||
1298
+ tokenizer_pre == "jina-v1-en" ||
1299
+ tokenizer_pre == "jina-v2-es" ||
1300
+ tokenizer_pre == "jina-v2-de" ||
1301
+ tokenizer_pre == "jina-v2-code" ||
1302
+ tokenizer_pre == "roberta-bpe") {
1303
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
1304
+ } else if (
1305
+ tokenizer_pre == "refact") {
1306
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT;
1307
+ } else if (
1308
+ tokenizer_pre == "command-r") {
1309
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
1310
+ vocab.tokenizer_clean_spaces = false;
1311
+ } else if (
1312
+ tokenizer_pre == "qwen2") {
1313
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
1314
+ vocab.tokenizer_clean_spaces = false;
1315
+ } else if (
1316
+ tokenizer_pre == "stablelm2") {
1317
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STABLELM2;
1318
+ } else if (
1319
+ tokenizer_pre == "olmo") {
1320
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO;
1321
+ } else if (
1322
+ tokenizer_pre == "dbrx") {
1323
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
1324
+ } else if (
1325
+ tokenizer_pre == "smaug-bpe") {
1326
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG;
1327
+ } else if (
1328
+ tokenizer_pre == "poro-chat") {
1329
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO;
1330
+ vocab.tokenizer_clean_spaces = false;
1331
+ } else if (
1332
+ tokenizer_pre == "chatglm-bpe") {
1333
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHATGLM4;
1334
+ vocab.special_bos_id = LLAMA_TOKEN_NULL;
1335
+ } else if (
1336
+ tokenizer_pre == "viking") {
1337
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_VIKING;
1338
+ vocab.tokenizer_clean_spaces = false;
1339
+ } else if (
1340
+ tokenizer_pre == "jais") {
1341
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS;
1342
+ } else if (
1343
+ tokenizer_pre == "tekken") {
1344
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_TEKKEN;
1345
+ vocab.tokenizer_clean_spaces = false;
1346
+ vocab.tokenizer_ignore_merges = true;
1347
+ vocab.tokenizer_add_bos = true;
1348
+ } else if (
1349
+ tokenizer_pre == "smollm") {
1350
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMOLLM;
1351
+ vocab.tokenizer_clean_spaces = false;
1352
+ } else if (
1353
+ tokenizer_pre == "codeshell") {
1354
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CODESHELL;
1355
+ } else if (
1356
+ tokenizer_pre == "bloom") {
1357
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_BLOOM;
1358
+ } else if (
1359
+ tokenizer_pre == "gpt3-finnish") {
1360
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH;
1361
+ } else if (
1362
+ tokenizer_pre == "exaone") {
1363
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_EXAONE;
1364
+ } else if (
1365
+ tokenizer_pre == "chameleon") {
1366
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHAMELEON;
1367
+ vocab.tokenizer_add_bos = true;
1368
+ vocab.tokenizer_clean_spaces = false;
1369
+ } else if (
1370
+ tokenizer_pre == "minerva-7b") {
1371
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MINERVA;
1372
+ } else if (
1373
+ tokenizer_pre == "megrez") {
1374
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
1375
+ } else {
1376
+ throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
1377
+ }
1378
+ } else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
1379
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
1380
+ vocab.tokenizer_add_space_prefix = true;
1381
+ vocab.tokenizer_clean_spaces = false;
1382
+ vocab.tokenizer_add_bos = true;
1383
+ vocab.tokenizer_add_eos = false;
1384
+ } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
1385
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
1386
+ vocab.tokenizer_add_space_prefix = false;
1387
+ vocab.tokenizer_clean_spaces = true;
1388
+ vocab.tokenizer_add_bos = true;
1389
+ vocab.tokenizer_add_eos = false;
1390
+ } else if (vocab.type == LLAMA_VOCAB_TYPE_UGM) {
1391
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
1392
+ vocab.tokenizer_add_bos = false;
1393
+ vocab.tokenizer_add_eos = true;
1394
+ } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) {
1395
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
1396
+ vocab.tokenizer_add_space_prefix = false;
1397
+ vocab.tokenizer_clean_spaces = false;
1398
+ vocab.tokenizer_add_bos = false;
1399
+ vocab.tokenizer_add_eos = false;
1400
+ } else {
1401
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
1402
+ }
1403
+
1404
+ ml.get_key(LLM_KV_TOKENIZER_ADD_PREFIX, vocab.tokenizer_add_space_prefix, false);
1405
+ ml.get_key(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.tokenizer_remove_extra_whitespaces, false);
1406
+ }
1407
+
1408
+ const int token_idx = lm_gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
1409
+ if (token_idx == -1) {
1410
+ throw std::runtime_error("cannot find tokenizer vocab in model file\n");
1411
+ }
1412
+
1413
+ const float * scores = nullptr;
1414
+ const int score_idx = lm_gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
1415
+ if (score_idx != -1) {
1416
+ scores = (const float * ) lm_gguf_get_arr_data(ctx, score_idx);
1417
+ }
1418
+
1419
+ const int * toktypes = nullptr;
1420
+ const int toktype_idx = lm_gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
1421
+ if (toktype_idx != -1) {
1422
+ toktypes = (const int * ) lm_gguf_get_arr_data(ctx, toktype_idx);
1423
+ }
1424
+
1425
+ const uint32_t n_vocab = lm_gguf_get_arr_n(ctx, token_idx);
1426
+
1427
+ vocab.n_vocab = n_vocab;
1428
+ vocab.id_to_token.resize(n_vocab);
1429
+
1430
+ for (uint32_t i = 0; i < n_vocab; i++) {
1431
+ std::string word = lm_gguf_get_arr_str(ctx, token_idx, i);
1432
+ if (word.empty()) {
1433
+ LLAMA_LOG_WARN("%s: empty token at index %u\n", __func__, i);
1434
+ word = "[EMPTY_" + std::to_string(i) + "]";
1435
+ }
1436
+
1437
+ vocab.token_to_id[word] = i;
1438
+ vocab.max_token_len = std::max(vocab.max_token_len, (int) word.size());
1439
+
1440
+ auto & token_data = vocab.id_to_token[i];
1441
+ token_data.text = std::move(word);
1442
+ token_data.score = scores ? scores[i] : 0.0f;
1443
+ token_data.attr = LLAMA_TOKEN_ATTR_NORMAL;
1444
+
1445
+ if (toktypes) { //TODO: remove, required until per token attributes are available from GGUF file
1446
+ switch(toktypes[i]) {
1447
+ case LLAMA_TOKEN_TYPE_UNKNOWN: token_data.attr = LLAMA_TOKEN_ATTR_UNKNOWN; break;
1448
+ case LLAMA_TOKEN_TYPE_UNUSED: token_data.attr = LLAMA_TOKEN_ATTR_UNUSED; break;
1449
+ case LLAMA_TOKEN_TYPE_NORMAL: token_data.attr = LLAMA_TOKEN_ATTR_NORMAL; break;
1450
+ case LLAMA_TOKEN_TYPE_CONTROL: token_data.attr = LLAMA_TOKEN_ATTR_CONTROL; break;
1451
+ case LLAMA_TOKEN_TYPE_USER_DEFINED: token_data.attr = LLAMA_TOKEN_ATTR_USER_DEFINED; break;
1452
+ case LLAMA_TOKEN_TYPE_BYTE: token_data.attr = LLAMA_TOKEN_ATTR_BYTE; break;
1453
+ case LLAMA_TOKEN_TYPE_UNDEFINED: token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED; break;
1454
+ default: token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED; break;
1455
+ }
1456
+ }
1457
+ }
1458
+ LM_GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
1459
+
1460
+ vocab.init_tokenizer();
1461
+
1462
+ // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
1463
+ if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
1464
+ try {
1465
+ vocab.linefeed_id = llama_byte_to_token_impl(vocab, '\n');
1466
+ } catch (const std::exception & e) {
1467
+ LLAMA_LOG_WARN("%s: SPM vocabulary, but newline token not found: %s! Using special_pad_id instead.", __func__, e.what());
1468
+ vocab.linefeed_id = vocab.special_pad_id;
1469
+ }
1470
+ } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
1471
+ vocab.linefeed_id = vocab.special_pad_id;
1472
+ } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) {
1473
+ const std::vector<int> ids = llama_tokenize_internal(vocab, "\n", false);
1474
+ LM_GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
1475
+ vocab.linefeed_id = ids[0];
1476
+ } else {
1477
+ const std::vector<int> ids = llama_tokenize_internal(vocab, "\xC4\x8A", false); // U+010A
1478
+
1479
+ //LM_GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
1480
+ if (ids.empty()) {
1481
+ LLAMA_LOG_WARN("%s: model vocab missing newline token, using special_pad_id instead\n", __func__);
1482
+ vocab.linefeed_id = vocab.special_pad_id;
1483
+ } else {
1484
+ vocab.linefeed_id = ids[0];
1485
+ }
1486
+ }
1487
+
1488
+ // special tokens
1489
+ {
1490
+ const std::vector<std::pair<enum llm_kv, int32_t &>> special_token_types = {
1491
+ { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id },
1492
+ { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id },
1493
+ { LLM_KV_TOKENIZER_EOT_ID, vocab.special_eot_id },
1494
+ { LLM_KV_TOKENIZER_EOM_ID, vocab.special_eom_id },
1495
+ { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id },
1496
+ { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id },
1497
+ { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id },
1498
+ { LLM_KV_TOKENIZER_CLS_ID, vocab.special_cls_id },
1499
+ { LLM_KV_TOKENIZER_MASK_ID, vocab.special_mask_id },
1500
+ { LLM_KV_TOKENIZER_FIM_PRE_ID, vocab.special_fim_pre_id },
1501
+ { LLM_KV_TOKENIZER_FIM_SUF_ID, vocab.special_fim_suf_id },
1502
+ { LLM_KV_TOKENIZER_FIM_MID_ID, vocab.special_fim_mid_id },
1503
+ { LLM_KV_TOKENIZER_FIM_PAD_ID, vocab.special_fim_pad_id },
1504
+ { LLM_KV_TOKENIZER_FIM_REP_ID, vocab.special_fim_rep_id },
1505
+ { LLM_KV_TOKENIZER_FIM_SEP_ID, vocab.special_fim_sep_id },
1506
+
1507
+ // deprecated
1508
+ { LLM_KV_TOKENIZER_PREFIX_ID, vocab.special_fim_pre_id },
1509
+ { LLM_KV_TOKENIZER_SUFFIX_ID, vocab.special_fim_suf_id },
1510
+ { LLM_KV_TOKENIZER_MIDDLE_ID, vocab.special_fim_mid_id },
1511
+ };
1512
+
1513
+ for (const auto & it : special_token_types) {
1514
+ const std::string & key = kv(std::get<0>(it));
1515
+ int32_t & id = std::get<1>(it);
1516
+
1517
+ uint32_t new_id;
1518
+ if (!ml.get_key(std::get<0>(it), new_id, false)) {
1519
+ continue;
1520
+ }
1521
+ if (new_id >= vocab.id_to_token.size()) {
1522
+ LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n",
1523
+ __func__, key.c_str(), new_id, id);
1524
+ } else {
1525
+ id = new_id;
1526
+ }
1527
+ }
1528
+
1529
+ // Handle add_bos_token and add_eos_token
1530
+ {
1531
+ bool temp = true;
1532
+
1533
+ if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) {
1534
+ vocab.tokenizer_add_bos = temp;
1535
+ }
1536
+ if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
1537
+ vocab.tokenizer_add_eos = temp;
1538
+ }
1539
+ }
1540
+
1541
+ // auto-detect special tokens by text
1542
+ // TODO: convert scripts should provide these tokens through the KV metadata LLM_KV_TOKENIZER_...
1543
+ // for now, we apply this workaround to find the tokens based on their text
1544
+
1545
+ for (const auto & t : vocab.token_to_id) {
1546
+ // find EOT token: "<|eot_id|>", "<|im_end|>", "<end_of_turn>", etc.
1547
+ if (vocab.special_eot_id == LLAMA_TOKEN_NULL) {
1548
+ if (false
1549
+ || t.first == "<|eot_id|>"
1550
+ || t.first == "<|im_end|>"
1551
+ || t.first == "<|end|>"
1552
+ || t.first == "<end_of_turn>"
1553
+ || t.first == "<|endoftext|>"
1554
+ || t.first == "<EOT>"
1555
+ || t.first == "<|end▁of▁sentence|>" // DeepSeek
1556
+ ) {
1557
+ vocab.special_eot_id = t.second;
1558
+ if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
1559
+ LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
1560
+ __func__, t.second, t.first.c_str());
1561
+ vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
1562
+ }
1563
+ }
1564
+ }
1565
+
1566
+ // find EOM token: "<|eom_id|>"
1567
+ if (vocab.special_eom_id == LLAMA_TOKEN_NULL) {
1568
+ if (false
1569
+ || t.first == "<|eom_id|>"
1570
+ ) {
1571
+ vocab.special_eom_id = t.second;
1572
+ if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
1573
+ LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
1574
+ __func__, t.second, t.first.c_str());
1575
+ vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
1576
+ }
1577
+ }
1578
+ }
1579
+
1580
+ // find FIM_PRE token: "<|fim_prefix|>", "<fim-prefix>", "<PRE>", etc.
1581
+ if (vocab.special_fim_pre_id == LLAMA_TOKEN_NULL) {
1582
+ if (false
1583
+ || t.first == "<|fim_prefix|>" // Qwen
1584
+ || t.first == "<fim-prefix>"
1585
+ || t.first == "<|fim▁begin|>" // DeepSeek
1586
+ || t.first == "<PRE>"
1587
+ ) {
1588
+ vocab.special_fim_pre_id = t.second;
1589
+ if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
1590
+ LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
1591
+ __func__, t.second, t.first.c_str());
1592
+ vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
1593
+ }
1594
+ }
1595
+ }
1596
+
1597
+ // find FIM_SUF token: "<|fim_suffix|>", "<fim-suffix>", "<SUF>", etc.
1598
+ if (vocab.special_fim_suf_id == LLAMA_TOKEN_NULL) {
1599
+ if (false
1600
+ || t.first == "<|fim_suffix|>" // Qwen
1601
+ || t.first == "<fim-suffix>"
1602
+ || t.first == "<|fim▁hole|>" // DeepSeek
1603
+ || t.first == "<SUF>"
1604
+ ) {
1605
+ vocab.special_fim_suf_id = t.second;
1606
+ if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
1607
+ LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
1608
+ __func__, t.second, t.first.c_str());
1609
+ vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
1610
+ }
1611
+ }
1612
+ }
1613
+
1614
+ // find FIM_MID token: "<|fim_middle|>", "<fim-middle>", "<MID>", etc.
1615
+ if (vocab.special_fim_mid_id == LLAMA_TOKEN_NULL) {
1616
+ if (false
1617
+ || t.first == "<|fim_middle|>" // Qwen
1618
+ || t.first == "<fim-middle>"
1619
+ || t.first == "<|fim▁end|>" // DeepSeek
1620
+ || t.first == "<MID>"
1621
+ ) {
1622
+ vocab.special_fim_mid_id = t.second;
1623
+ if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
1624
+ LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
1625
+ __func__, t.second, t.first.c_str());
1626
+ vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
1627
+ }
1628
+ }
1629
+ }
1630
+
1631
+ // find FIM_PAD token: "<|fim_pad|>", "<fim-pad>", "<PAD>", etc.
1632
+ if (vocab.special_fim_pad_id == LLAMA_TOKEN_NULL) {
1633
+ if (false
1634
+ || t.first == "<|fim_pad|>" // Qwen
1635
+ || t.first == "<fim-pad>"
1636
+ || t.first == "<PAD>"
1637
+ ) {
1638
+ vocab.special_fim_pad_id = t.second;
1639
+ if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
1640
+ LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
1641
+ __func__, t.second, t.first.c_str());
1642
+ vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
1643
+ }
1644
+ }
1645
+ }
1646
+
1647
+ // find FIM_REP token: "<|fim_repo|>", "<fim-repo>", "<REP>", etc.
1648
+ if (vocab.special_fim_rep_id == LLAMA_TOKEN_NULL) {
1649
+ if (false
1650
+ || t.first == "<|fim_repo|>" // Qwen
1651
+ || t.first == "<|repo_name|>"
1652
+ || t.first == "<fim-repo>"
1653
+ || t.first == "<REPO>"
1654
+ ) {
1655
+ vocab.special_fim_rep_id = t.second;
1656
+ if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
1657
+ LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
1658
+ __func__, t.second, t.first.c_str());
1659
+ vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
1660
+ }
1661
+ }
1662
+ }
1663
+
1664
+ // find FIM_SEP token: "<|file_sep|>"
1665
+ if (vocab.special_fim_sep_id == LLAMA_TOKEN_NULL) {
1666
+ if (false
1667
+ || t.first == "<|file_sep|>" // Qwen
1668
+ ) {
1669
+ vocab.special_fim_sep_id = t.second;
1670
+ if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
1671
+ LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
1672
+ __func__, t.second, t.first.c_str());
1673
+ vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
1674
+ }
1675
+ }
1676
+ }
1677
+ }
1678
+
1679
+ // maintain a list of tokens that cause end-of-generation
1680
+ // this is currently determined based on the token text, which is obviously not ideal
1681
+ // ref: https://github.com/ggerganov/llama.cpp/issues/9606
1682
+ vocab.special_eog_ids.clear();
1683
+
1684
+ if (vocab.special_fim_pad_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_pad_id) == 0) {
1685
+ vocab.special_eog_ids.insert(vocab.special_fim_pad_id);
1686
+ }
1687
+
1688
+ if (vocab.special_fim_rep_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_rep_id) == 0) {
1689
+ vocab.special_eog_ids.insert(vocab.special_fim_rep_id);
1690
+ }
1691
+
1692
+ if (vocab.special_fim_sep_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_sep_id) == 0) {
1693
+ vocab.special_eog_ids.insert(vocab.special_fim_sep_id);
1694
+ }
1695
+
1696
+ for (const auto & t : vocab.token_to_id) {
1697
+ if (false
1698
+ || t.first == "<|eot_id|>"
1699
+ || t.first == "<|im_end|>"
1700
+ || t.first == "<|end|>"
1701
+ || t.first == "<end_of_turn>"
1702
+ || t.first == "<|endoftext|>"
1703
+ || t.first == "<|eom_id|>"
1704
+ || t.first == "<EOT>"
1705
+ ) {
1706
+ vocab.special_eog_ids.insert(t.second);
1707
+ if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
1708
+ LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
1709
+ __func__, t.second, t.first.c_str());
1710
+ vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
1711
+ }
1712
+ } else {
1713
+ // token is control, but not marked as EOG -> print a debug log
1714
+ if (vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL && vocab.special_eog_ids.count(t.second) == 0) {
1715
+ LLAMA_LOG_DEBUG("%s: control token: %6d '%s' is not marked as EOG\n",
1716
+ __func__, t.second, t.first.c_str());
1717
+ }
1718
+ }
1719
+ }
1720
+
1721
+ // sanity checks
1722
+ if (vocab.special_eos_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eos_id) == 0) {
1723
+ vocab.special_eog_ids.insert(vocab.special_eos_id);
1724
+ LLAMA_LOG_WARN("%s: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
1725
+ }
1726
+
1727
+ if (vocab.special_eot_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eot_id) == 0) {
1728
+ vocab.special_eog_ids.insert(vocab.special_eot_id);
1729
+ LLAMA_LOG_WARN("%s: special_eot_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
1730
+ }
1731
+
1732
+ if (vocab.special_eom_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eom_id) == 0) {
1733
+ vocab.special_eog_ids.insert(vocab.special_eom_id);
1734
+ LLAMA_LOG_WARN("%s: special_eom_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
1735
+ }
1736
+ }
1737
+
1738
+ // build special tokens cache
1739
+ {
1740
+ for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
1741
+ if (vocab.id_to_token[id].attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED | LLAMA_TOKEN_ATTR_UNKNOWN)) {
1742
+ vocab.cache_special_tokens.push_back(id);
1743
+ }
1744
+ }
1745
+
1746
+ std::sort(vocab.cache_special_tokens.begin(), vocab.cache_special_tokens.end(),
1747
+ [&] (const llama_vocab::id a, const llama_vocab::id b) {
1748
+ return vocab.id_to_token[a].text.size() > vocab.id_to_token[b].text.size();
1749
+ }
1750
+ );
1751
+
1752
+ LLAMA_LOG_INFO("%s: special tokens cache size = %u\n", __func__, (uint32_t)vocab.cache_special_tokens.size());
1753
+ }
1754
+
1755
+ // build token to piece cache
1756
+ {
1757
+ size_t size_cache = 0;
1758
+
1759
+ std::vector<llama_vocab::token> cache_token_to_piece(n_vocab);
1760
+
1761
+ for (uint32_t id = 0; id < n_vocab; ++id) {
1762
+ cache_token_to_piece[id] = llama_token_to_piece(&model, id, true);
1763
+
1764
+ size_cache += cache_token_to_piece[id].size();
1765
+ }
1766
+
1767
+ std::swap(vocab.cache_token_to_piece, cache_token_to_piece);
1768
+
1769
+ LLAMA_LOG_INFO("%s: token to piece cache size = %.4f MB\n", __func__, size_cache / 1024.0 / 1024.0);
1770
+ }
1771
+
1772
+ // Handle per token attributes
1773
+ //NOTE: Each model customizes per token attributes.
1774
+ //NOTE: Per token attributes are missing from the GGUF file.
1775
+ //TODO: Extract attributes from GGUF file.
1776
+ {
1777
+ auto _contains_any = [] (const std::string &str, const std::vector<std::string> &substrs) -> bool {
1778
+ for (auto substr : substrs) {
1779
+ if (str.find(substr) < std::string::npos) {
1780
+ return true;
1781
+ }
1782
+ }
1783
+ return false;
1784
+ };
1785
+
1786
+ auto _set_tokenid_attr = [&] (const llama_vocab::id id, llama_token_attr attr, bool value) {
1787
+ uint32_t current = vocab.id_to_token.at(id).attr;
1788
+ current = value ? (current | attr) : (current & ~attr);
1789
+ vocab.id_to_token[id].attr = (llama_token_attr) current;
1790
+ };
1791
+
1792
+ auto _set_token_attr = [&] (const std::string & token, llama_token_attr attr, bool value) {
1793
+ _set_tokenid_attr(vocab.token_to_id.at(token), attr, value);
1794
+ };
1795
+
1796
+ std::string model_name;
1797
+ std::string tokenizer_pre;
1798
+
1799
+ ml.get_key(LLM_KV_GENERAL_NAME, model_name, false);
1800
+ ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
1801
+
1802
+ // model name to lowercase
1803
+ std::transform(model_name.begin(), model_name.end(), model_name.begin(),
1804
+ [] (const std::string::value_type x) {
1805
+ return std::tolower(x);
1806
+ }
1807
+ );
1808
+
1809
+ // set attributes by model/tokenizer name
1810
+ if (_contains_any(tokenizer_pre, {"jina-v2-de", "jina-v2-es", "jina-v2-code"})) {
1811
+ _set_token_attr("<mask>", LLAMA_TOKEN_ATTR_LSTRIP, true);
1812
+ } else if (_contains_any(model_name, {"phi-3", "phi3"})) {
1813
+ for (auto id : vocab.cache_special_tokens) {
1814
+ _set_tokenid_attr(id, LLAMA_TOKEN_ATTR_RSTRIP, true);
1815
+ }
1816
+ for (auto token : {"</s>"}) {
1817
+ _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, true);
1818
+ }
1819
+ for (auto token : {"<unk>", "<s>", "<|endoftext|>"}) {
1820
+ _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, false);
1821
+ }
1822
+ }
1823
+ }
1824
+ }
1825
+
1826
+ void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
1827
+ const auto & hparams = model.hparams;
1828
+ const auto & vocab = model.vocab;
1829
+
1830
+ const char * rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
1831
+
1832
+ auto print_f = [](const std::function<uint32_t(uint32_t)> & f, uint32_t n) {
1833
+ bool is_var = false;
1834
+
1835
+ std::vector<uint32_t> v;
1836
+ for (uint32_t i = 0; i < n; ++i) {
1837
+ v.push_back(f(i));
1838
+ if (v[i] != v[0]) {
1839
+ is_var = true;
1840
+ }
1841
+ }
1842
+
1843
+ std::stringstream ss;
1844
+
1845
+ if (is_var) {
1846
+ ss << "[";
1847
+ for (uint32_t i = 0; i < n; ++i) {
1848
+ ss << v[i];
1849
+ if (i < n - 1) {
1850
+ ss << ", ";
1851
+ }
1852
+ }
1853
+ ss << "]";
1854
+ } else {
1855
+ ss << v[0];
1856
+ }
1857
+
1858
+ return ss.str();
1859
+ };
1860
+
1861
+ // hparams
1862
+ LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver));
1863
+ LLAMA_LOG_INFO("%s: arch = %s\n", __func__, llm_arch_name(model.arch));
1864
+ LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, llama_model_vocab_type_name(vocab.type));
1865
+ LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
1866
+ LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (int) vocab.bpe_ranks.size());
1867
+ LLAMA_LOG_INFO("%s: vocab_only = %d\n", __func__, hparams.vocab_only);
1868
+
1869
+ if (!hparams.vocab_only) {
1870
+ LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
1871
+ LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
1872
+ LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
1873
+ LLAMA_LOG_INFO("%s: n_head = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_head(il); }, hparams.n_layer).c_str());
1874
+ LLAMA_LOG_INFO("%s: n_head_kv = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_head_kv(il); }, hparams.n_layer).c_str());
1875
+ LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot);
1876
+ LLAMA_LOG_INFO("%s: n_swa = %u\n", __func__, hparams.n_swa);
1877
+ LLAMA_LOG_INFO("%s: n_embd_head_k = %u\n", __func__, hparams.n_embd_head_k);
1878
+ LLAMA_LOG_INFO("%s: n_embd_head_v = %u\n", __func__, hparams.n_embd_head_v);
1879
+ LLAMA_LOG_INFO("%s: n_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_gqa(il); }, hparams.n_layer).c_str());
1880
+ LLAMA_LOG_INFO("%s: n_embd_k_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_embd_k_gqa(il); }, hparams.n_layer).c_str());
1881
+ LLAMA_LOG_INFO("%s: n_embd_v_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_embd_v_gqa(il); }, hparams.n_layer).c_str());
1882
+ LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
1883
+ LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
1884
+ LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv);
1885
+ LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias);
1886
+ LLAMA_LOG_INFO("%s: f_logit_scale = %.1e\n", __func__, hparams.f_logit_scale);
1887
+ LLAMA_LOG_INFO("%s: n_ff = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_ff(il); }, hparams.n_layer).c_str());
1888
+ LLAMA_LOG_INFO("%s: n_expert = %u\n", __func__, hparams.n_expert);
1889
+ LLAMA_LOG_INFO("%s: n_expert_used = %u\n", __func__, hparams.n_expert_used);
1890
+ LLAMA_LOG_INFO("%s: causal attn = %d\n", __func__, hparams.causal_attn);
1891
+ LLAMA_LOG_INFO("%s: pooling type = %d\n", __func__, hparams.pooling_type);
1892
+ LLAMA_LOG_INFO("%s: rope type = %d\n", __func__, hparams.rope_type);
1893
+ LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type);
1894
+ LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
1895
+ LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
1896
+ LLAMA_LOG_INFO("%s: n_ctx_orig_yarn = %u\n", __func__, hparams.n_ctx_orig_yarn);
1897
+ LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
1898
+ LLAMA_LOG_INFO("%s: ssm_d_conv = %u\n", __func__, hparams.ssm_d_conv);
1899
+ LLAMA_LOG_INFO("%s: ssm_d_inner = %u\n", __func__, hparams.ssm_d_inner);
1900
+ LLAMA_LOG_INFO("%s: ssm_d_state = %u\n", __func__, hparams.ssm_d_state);
1901
+ LLAMA_LOG_INFO("%s: ssm_dt_rank = %u\n", __func__, hparams.ssm_dt_rank);
1902
+ LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms = %d\n", __func__, hparams.ssm_dt_b_c_rms);
1903
+ }
1904
+
1905
+ LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model).c_str());
1906
+ LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model).c_str());
1907
+ if (ml.n_elements >= 1e12) {
1908
+ LLAMA_LOG_INFO("%s: model params = %.2f T\n", __func__, ml.n_elements*1e-12);
1909
+ } else if (ml.n_elements >= 1e9) {
1910
+ LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
1911
+ } else if (ml.n_elements >= 1e6) {
1912
+ LLAMA_LOG_INFO("%s: model params = %.2f M\n", __func__, ml.n_elements*1e-6);
1913
+ } else {
1914
+ LLAMA_LOG_INFO("%s: model params = %.2f K\n", __func__, ml.n_elements*1e-3);
1915
+ }
1916
+ if (ml.n_bytes < GiB) {
1917
+ LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
1918
+ } else {
1919
+ LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
1920
+ }
1921
+
1922
+ // general kv
1923
+ LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
1924
+
1925
+ // special tokens
1926
+ if (vocab.special_bos_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
1927
+ if (vocab.special_eos_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
1928
+ if (vocab.special_eot_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: EOT token = %d '%s'\n", __func__, vocab.special_eot_id, vocab.id_to_token[vocab.special_eot_id].text.c_str() ); }
1929
+ if (vocab.special_eom_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: EOM token = %d '%s'\n", __func__, vocab.special_eom_id, vocab.id_to_token[vocab.special_eom_id].text.c_str() ); }
1930
+ if (vocab.special_unk_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
1931
+ if (vocab.special_sep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
1932
+ if (vocab.special_pad_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
1933
+ if (vocab.special_cls_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: CLS token = %d '%s'\n", __func__, vocab.special_cls_id, vocab.id_to_token[vocab.special_cls_id].text.c_str() ); }
1934
+ if (vocab.special_mask_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: MASK token = %d '%s'\n", __func__, vocab.special_mask_id, vocab.id_to_token[vocab.special_mask_id].text.c_str() ); }
1935
+
1936
+ if (vocab.linefeed_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
1937
+
1938
+ if (vocab.special_fim_pre_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PRE token = %d '%s'\n", __func__, vocab.special_fim_pre_id, vocab.id_to_token[vocab.special_fim_pre_id].text.c_str() ); }
1939
+ if (vocab.special_fim_suf_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SUF token = %d '%s'\n", __func__, vocab.special_fim_suf_id, vocab.id_to_token[vocab.special_fim_suf_id].text.c_str() ); }
1940
+ if (vocab.special_fim_mid_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM MID token = %d '%s'\n", __func__, vocab.special_fim_mid_id, vocab.id_to_token[vocab.special_fim_mid_id].text.c_str() ); }
1941
+ if (vocab.special_fim_pad_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PAD token = %d '%s'\n", __func__, vocab.special_fim_pad_id, vocab.id_to_token[vocab.special_fim_pad_id].text.c_str() ); }
1942
+ if (vocab.special_fim_rep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM REP token = %d '%s'\n", __func__, vocab.special_fim_rep_id, vocab.id_to_token[vocab.special_fim_rep_id].text.c_str() ); }
1943
+ if (vocab.special_fim_sep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SEP token = %d '%s'\n", __func__, vocab.special_fim_sep_id, vocab.id_to_token[vocab.special_fim_sep_id].text.c_str() ); }
1944
+
1945
+ for (const auto & id : vocab.special_eog_ids) {
1946
+ LLAMA_LOG_INFO( "%s: EOG token = %d '%s'\n", __func__, id, vocab.id_to_token[id].text.c_str() );
1947
+ }
1948
+
1949
+ LLAMA_LOG_INFO("%s: max token length = %d\n", __func__, vocab.max_token_len);
1950
+
1951
+ if (model.arch == LLM_ARCH_DEEPSEEK) {
1952
+ LLAMA_LOG_INFO("%s: n_layer_dense_lead = %d\n", __func__, hparams.n_layer_dense_lead);
1953
+ LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
1954
+ LLAMA_LOG_INFO("%s: n_expert_shared = %d\n", __func__, hparams.n_expert_shared);
1955
+ LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n", __func__, hparams.expert_weights_scale);
1956
+ }
1957
+
1958
+ if (model.arch == LLM_ARCH_DEEPSEEK2) {
1959
+ LLAMA_LOG_INFO("%s: n_layer_dense_lead = %d\n", __func__, hparams.n_layer_dense_lead);
1960
+ LLAMA_LOG_INFO("%s: n_lora_q = %d\n", __func__, hparams.n_lora_q);
1961
+ LLAMA_LOG_INFO("%s: n_lora_kv = %d\n", __func__, hparams.n_lora_kv);
1962
+ LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
1963
+ LLAMA_LOG_INFO("%s: n_expert_shared = %d\n", __func__, hparams.n_expert_shared);
1964
+ LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n", __func__, hparams.expert_weights_scale);
1965
+ LLAMA_LOG_INFO("%s: expert_weights_norm = %d\n", __func__, hparams.expert_weights_norm);
1966
+ LLAMA_LOG_INFO("%s: expert_gating_func = %s\n", __func__, llama_expert_gating_func_name((enum llama_expert_gating_func_type) hparams.expert_gating_func));
1967
+ LLAMA_LOG_INFO("%s: rope_yarn_log_mul = %.4f\n", __func__, hparams.rope_yarn_log_mul);
1968
+ }
1969
+
1970
+ if (model.arch == LLM_ARCH_QWEN2MOE) {
1971
+ LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
1972
+ LLAMA_LOG_INFO("%s: n_ff_shexp = %d\n", __func__, hparams.n_ff_shexp);
1973
+ }
1974
+
1975
+ if (model.arch == LLM_ARCH_MINICPM || model.arch == LLM_ARCH_GRANITE || model.arch == LLM_ARCH_GRANITE_MOE) {
1976
+ LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale);
1977
+ LLAMA_LOG_INFO("%s: f_residual_scale = %f\n", __func__, hparams.f_residual_scale);
1978
+ LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale);
1979
+ }
1980
+ }
1981
+
1982
+ //
1983
+ // interface implementation
1984
+ //
1985
+
1986
+ struct llama_model_params llama_model_default_params() {
1987
+ struct llama_model_params result = {
1988
+ /*.devices =*/ nullptr,
1989
+ /*.n_gpu_layers =*/ 0,
1990
+ /*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER,
1991
+ /*.main_gpu =*/ 0,
1992
+ /*.tensor_split =*/ nullptr,
1993
+ /*.rpc_servers =*/ nullptr,
1994
+ /*.progress_callback =*/ nullptr,
1995
+ /*.progress_callback_user_data =*/ nullptr,
1996
+ /*.kv_overrides =*/ nullptr,
1997
+ /*.vocab_only =*/ false,
1998
+ /*.use_mmap =*/ true,
1999
+ /*.use_mlock =*/ false,
2000
+ /*.check_tensors =*/ false,
2001
+ };
2002
+
2003
+ #ifdef LM_GGML_USE_METAL
2004
+ // note: we usually have plenty of VRAM, so by default offload all layers to the GPU
2005
+ result.n_gpu_layers = 999;
2006
+ #endif
2007
+
2008
+ return result;
2009
+ }
2010
+
2011
+ void llama_free_model(struct llama_model * model) {
2012
+ llama_model_free(model);
2013
+ }
2014
+
2015
+ void llama_model_free(struct llama_model * model) {
2016
+ delete model;
2017
+ }
2018
+
2019
+ enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
2020
+ return model->vocab.type;
2021
+ }
2022
+
2023
+ int32_t llama_n_vocab(const struct llama_model * model) {
2024
+ return model->hparams.n_vocab;
2025
+ }
2026
+
2027
+ int32_t llama_n_ctx_train(const struct llama_model * model) {
2028
+ return model->hparams.n_ctx_train;
2029
+ }
2030
+
2031
+ int32_t llama_n_embd(const struct llama_model * model) {
2032
+ return model->hparams.n_embd;
2033
+ }
2034
+
2035
+ int32_t llama_n_layer(const struct llama_model * model) {
2036
+ return model->hparams.n_layer;
2037
+ }
2038
+
2039
+ int32_t llama_n_head(const struct llama_model * model) {
2040
+ return model->hparams.n_head();
2041
+ }
2042
+
2043
+ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
2044
+ switch (model->arch) {
2045
+ // these models do not use RoPE
2046
+ case LLM_ARCH_GPT2:
2047
+ case LLM_ARCH_GPTJ:
2048
+ case LLM_ARCH_MPT:
2049
+ case LLM_ARCH_REFACT:
2050
+ case LLM_ARCH_BLOOM:
2051
+ case LLM_ARCH_MAMBA:
2052
+ case LLM_ARCH_JINA_BERT_V2:
2053
+ case LLM_ARCH_T5:
2054
+ case LLM_ARCH_T5ENCODER:
2055
+ case LLM_ARCH_JAIS:
2056
+ case LLM_ARCH_RWKV6:
2057
+ case LLM_ARCH_WAVTOKENIZER_DEC:
2058
+ return LLAMA_ROPE_TYPE_NONE;
2059
+
2060
+ // use what we call a normal RoPE, operating on pairs of consecutive head values
2061
+ case LLM_ARCH_LLAMA:
2062
+ case LLM_ARCH_DECI:
2063
+ case LLM_ARCH_BAICHUAN:
2064
+ case LLM_ARCH_STARCODER:
2065
+ case LLM_ARCH_PLAMO:
2066
+ case LLM_ARCH_ORION:
2067
+ case LLM_ARCH_INTERNLM2:
2068
+ case LLM_ARCH_MINICPM:
2069
+ case LLM_ARCH_XVERSE:
2070
+ case LLM_ARCH_COMMAND_R:
2071
+ case LLM_ARCH_COHERE2:
2072
+ case LLM_ARCH_OLMO:
2073
+ case LLM_ARCH_ARCTIC:
2074
+ case LLM_ARCH_DEEPSEEK:
2075
+ case LLM_ARCH_DEEPSEEK2:
2076
+ case LLM_ARCH_CHATGLM:
2077
+ case LLM_ARCH_GRANITE:
2078
+ case LLM_ARCH_GRANITE_MOE:
2079
+ case LLM_ARCH_CHAMELEON:
2080
+ return LLAMA_ROPE_TYPE_NORM;
2081
+
2082
+ // the pairs of head values are offset by n_rot/2
2083
+ case LLM_ARCH_FALCON:
2084
+ case LLM_ARCH_GROK:
2085
+ case LLM_ARCH_DBRX:
2086
+ case LLM_ARCH_BERT:
2087
+ case LLM_ARCH_NOMIC_BERT:
2088
+ case LLM_ARCH_STABLELM:
2089
+ case LLM_ARCH_BITNET:
2090
+ case LLM_ARCH_QWEN:
2091
+ case LLM_ARCH_QWEN2:
2092
+ case LLM_ARCH_QWEN2MOE:
2093
+ case LLM_ARCH_OLMO2:
2094
+ case LLM_ARCH_OLMOE:
2095
+ case LLM_ARCH_PHI2:
2096
+ case LLM_ARCH_PHI3:
2097
+ case LLM_ARCH_GEMMA:
2098
+ case LLM_ARCH_GEMMA2:
2099
+ case LLM_ARCH_STARCODER2:
2100
+ case LLM_ARCH_OPENELM:
2101
+ case LLM_ARCH_GPTNEOX:
2102
+ case LLM_ARCH_CODESHELL:
2103
+ case LLM_ARCH_NEMOTRON:
2104
+ case LLM_ARCH_EXAONE:
2105
+ case LLM_ARCH_MINICPM3:
2106
+ return LLAMA_ROPE_TYPE_NEOX;
2107
+
2108
+ case LLM_ARCH_QWEN2VL:
2109
+ return LLAMA_ROPE_TYPE_MROPE;
2110
+
2111
+ // all model arches should be listed explicitly here
2112
+ case LLM_ARCH_UNKNOWN:
2113
+ LM_GGML_ABORT("unknown architecture");
2114
+ }
2115
+
2116
+ return LLAMA_ROPE_TYPE_NONE;
2117
+ }
2118
+
2119
+ float llama_rope_freq_scale_train(const struct llama_model * model) {
2120
+ return model->hparams.rope_freq_scale_train;
2121
+ }
2122
+
2123
+ int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
2124
+ const auto & it = model->lm_gguf_kv.find(key);
2125
+ if (it == model->lm_gguf_kv.end()) {
2126
+ if (buf_size > 0) {
2127
+ buf[0] = '\0';
2128
+ }
2129
+ return -1;
2130
+ }
2131
+ return snprintf(buf, buf_size, "%s", it->second.c_str());
2132
+ }
2133
+
2134
+ int32_t llama_model_meta_count(const struct llama_model * model) {
2135
+ return (int)model->lm_gguf_kv.size();
2136
+ }
2137
+
2138
+ int32_t llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
2139
+ if (i < 0 || i >= (int)model->lm_gguf_kv.size()) {
2140
+ if (buf_size > 0) {
2141
+ buf[0] = '\0';
2142
+ }
2143
+ return -1;
2144
+ }
2145
+ auto it = model->lm_gguf_kv.begin();
2146
+ std::advance(it, i);
2147
+ return snprintf(buf, buf_size, "%s", it->first.c_str());
2148
+ }
2149
+
2150
+ int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size) {
2151
+ if (i < 0 || i >= (int)model->lm_gguf_kv.size()) {
2152
+ if (buf_size > 0) {
2153
+ buf[0] = '\0';
2154
+ }
2155
+ return -1;
2156
+ }
2157
+ auto it = model->lm_gguf_kv.begin();
2158
+ std::advance(it, i);
2159
+ return snprintf(buf, buf_size, "%s", it->second.c_str());
2160
+ }
2161
+
2162
+ int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
2163
+ return snprintf(buf, buf_size, "%s %s %s",
2164
+ llama_model_arch_name (*model).c_str(),
2165
+ llama_model_type_name (*model).c_str(),
2166
+ llama_model_ftype_name(*model).c_str());
2167
+ }
2168
+
2169
+ uint64_t llama_model_size(const struct llama_model * model) {
2170
+ return model->n_bytes;
2171
+ }
2172
+
2173
+ uint64_t llama_model_n_params(const struct llama_model * model) {
2174
+ return model->n_elements;
2175
+ }
2176
+
2177
+ bool llama_model_has_encoder(const struct llama_model * model) {
2178
+ switch (model->arch) {
2179
+ case LLM_ARCH_T5: return true;
2180
+ case LLM_ARCH_T5ENCODER: return true;
2181
+ default: return false;
2182
+ }
2183
+ }
2184
+
2185
+ bool llama_model_has_decoder(const struct llama_model * model) {
2186
+ switch (model->arch) {
2187
+ case LLM_ARCH_T5ENCODER: return false;
2188
+ default: return true;
2189
+ }
2190
+ }
2191
+
2192
+ llama_token llama_model_decoder_start_token(const struct llama_model * model) {
2193
+ return model->hparams.dec_start_token_id;
2194
+ }
2195
+
2196
+ bool llama_model_is_recurrent(const struct llama_model * model) {
2197
+ switch (model->arch) {
2198
+ case LLM_ARCH_MAMBA: return true;
2199
+ case LLM_ARCH_RWKV6: return true;
2200
+ default: return false;
2201
+ }
2202
+ }