llama-cpp-capacitor 0.0.6 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. package/cpp/LICENSE +21 -0
  2. package/cpp/README.md +4 -0
  3. package/cpp/anyascii.c +22223 -0
  4. package/cpp/anyascii.h +42 -0
  5. package/cpp/chat-parser.cpp +393 -0
  6. package/cpp/chat-parser.h +120 -0
  7. package/cpp/chat.cpp +2315 -0
  8. package/cpp/chat.h +221 -0
  9. package/cpp/common.cpp +1619 -0
  10. package/cpp/common.h +744 -0
  11. package/cpp/ggml-alloc.c +1028 -0
  12. package/cpp/ggml-alloc.h +76 -0
  13. package/cpp/ggml-backend-impl.h +255 -0
  14. package/cpp/ggml-backend-reg.cpp +600 -0
  15. package/cpp/ggml-backend.cpp +2118 -0
  16. package/cpp/ggml-backend.h +354 -0
  17. package/cpp/ggml-common.h +1878 -0
  18. package/cpp/ggml-cpp.h +39 -0
  19. package/cpp/ggml-cpu/amx/amx.cpp +221 -0
  20. package/cpp/ggml-cpu/amx/amx.h +8 -0
  21. package/cpp/ggml-cpu/amx/common.h +91 -0
  22. package/cpp/ggml-cpu/amx/mmq.cpp +2512 -0
  23. package/cpp/ggml-cpu/amx/mmq.h +10 -0
  24. package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  25. package/cpp/ggml-cpu/arch/arm/quants.c +3650 -0
  26. package/cpp/ggml-cpu/arch/arm/repack.cpp +1891 -0
  27. package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
  28. package/cpp/ggml-cpu/arch/x86/quants.c +3820 -0
  29. package/cpp/ggml-cpu/arch/x86/repack.cpp +6307 -0
  30. package/cpp/ggml-cpu/arch-fallback.h +215 -0
  31. package/cpp/ggml-cpu/binary-ops.cpp +158 -0
  32. package/cpp/ggml-cpu/binary-ops.h +16 -0
  33. package/cpp/ggml-cpu/common.h +73 -0
  34. package/cpp/ggml-cpu/ggml-cpu-impl.h +525 -0
  35. package/cpp/ggml-cpu/ggml-cpu.c +3578 -0
  36. package/cpp/ggml-cpu/ggml-cpu.cpp +672 -0
  37. package/cpp/ggml-cpu/ops.cpp +10587 -0
  38. package/cpp/ggml-cpu/ops.h +114 -0
  39. package/cpp/ggml-cpu/quants.c +1193 -0
  40. package/cpp/ggml-cpu/quants.h +97 -0
  41. package/cpp/ggml-cpu/repack.cpp +1982 -0
  42. package/cpp/ggml-cpu/repack.h +120 -0
  43. package/cpp/ggml-cpu/simd-mappings.h +1184 -0
  44. package/cpp/ggml-cpu/traits.cpp +36 -0
  45. package/cpp/ggml-cpu/traits.h +38 -0
  46. package/cpp/ggml-cpu/unary-ops.cpp +186 -0
  47. package/cpp/ggml-cpu/unary-ops.h +28 -0
  48. package/cpp/ggml-cpu/vec.cpp +348 -0
  49. package/cpp/ggml-cpu/vec.h +1121 -0
  50. package/cpp/ggml-cpu.h +145 -0
  51. package/cpp/ggml-impl.h +622 -0
  52. package/cpp/ggml-metal-impl.h +688 -0
  53. package/cpp/ggml-metal.h +66 -0
  54. package/cpp/ggml-metal.m +6833 -0
  55. package/cpp/ggml-opt.cpp +1093 -0
  56. package/cpp/ggml-opt.h +256 -0
  57. package/cpp/ggml-quants.c +5324 -0
  58. package/cpp/ggml-quants.h +106 -0
  59. package/cpp/ggml-threading.cpp +12 -0
  60. package/cpp/ggml-threading.h +14 -0
  61. package/cpp/ggml.c +7108 -0
  62. package/cpp/ggml.h +2492 -0
  63. package/cpp/gguf.cpp +1358 -0
  64. package/cpp/gguf.h +202 -0
  65. package/cpp/json-partial.cpp +256 -0
  66. package/cpp/json-partial.h +38 -0
  67. package/cpp/json-schema-to-grammar.cpp +985 -0
  68. package/cpp/json-schema-to-grammar.h +21 -0
  69. package/cpp/llama-adapter.cpp +388 -0
  70. package/cpp/llama-adapter.h +76 -0
  71. package/cpp/llama-arch.cpp +2355 -0
  72. package/cpp/llama-arch.h +499 -0
  73. package/cpp/llama-batch.cpp +875 -0
  74. package/cpp/llama-batch.h +160 -0
  75. package/cpp/llama-chat.cpp +783 -0
  76. package/cpp/llama-chat.h +65 -0
  77. package/cpp/llama-context.cpp +2748 -0
  78. package/cpp/llama-context.h +306 -0
  79. package/cpp/llama-cparams.cpp +5 -0
  80. package/cpp/llama-cparams.h +41 -0
  81. package/cpp/llama-cpp.h +30 -0
  82. package/cpp/llama-grammar.cpp +1229 -0
  83. package/cpp/llama-grammar.h +173 -0
  84. package/cpp/llama-graph.cpp +1891 -0
  85. package/cpp/llama-graph.h +810 -0
  86. package/cpp/llama-hparams.cpp +180 -0
  87. package/cpp/llama-hparams.h +233 -0
  88. package/cpp/llama-impl.cpp +167 -0
  89. package/cpp/llama-impl.h +61 -0
  90. package/cpp/llama-io.cpp +15 -0
  91. package/cpp/llama-io.h +35 -0
  92. package/cpp/llama-kv-cache-iswa.cpp +318 -0
  93. package/cpp/llama-kv-cache-iswa.h +135 -0
  94. package/cpp/llama-kv-cache.cpp +2059 -0
  95. package/cpp/llama-kv-cache.h +374 -0
  96. package/cpp/llama-kv-cells.h +491 -0
  97. package/cpp/llama-memory-hybrid.cpp +258 -0
  98. package/cpp/llama-memory-hybrid.h +137 -0
  99. package/cpp/llama-memory-recurrent.cpp +1146 -0
  100. package/cpp/llama-memory-recurrent.h +179 -0
  101. package/cpp/llama-memory.cpp +59 -0
  102. package/cpp/llama-memory.h +119 -0
  103. package/cpp/llama-mmap.cpp +600 -0
  104. package/cpp/llama-mmap.h +68 -0
  105. package/cpp/llama-model-loader.cpp +1164 -0
  106. package/cpp/llama-model-loader.h +170 -0
  107. package/cpp/llama-model-saver.cpp +282 -0
  108. package/cpp/llama-model-saver.h +37 -0
  109. package/cpp/llama-model.cpp +19042 -0
  110. package/cpp/llama-model.h +491 -0
  111. package/cpp/llama-sampling.cpp +2575 -0
  112. package/cpp/llama-sampling.h +32 -0
  113. package/cpp/llama-vocab.cpp +3792 -0
  114. package/cpp/llama-vocab.h +176 -0
  115. package/cpp/llama.cpp +358 -0
  116. package/cpp/llama.h +1373 -0
  117. package/cpp/log.cpp +427 -0
  118. package/cpp/log.h +103 -0
  119. package/cpp/minja/chat-template.hpp +550 -0
  120. package/cpp/minja/minja.hpp +3009 -0
  121. package/cpp/nlohmann/json.hpp +25526 -0
  122. package/cpp/nlohmann/json_fwd.hpp +187 -0
  123. package/cpp/regex-partial.cpp +204 -0
  124. package/cpp/regex-partial.h +56 -0
  125. package/cpp/rn-completion.cpp +681 -0
  126. package/cpp/rn-completion.h +116 -0
  127. package/cpp/rn-llama.cpp +345 -0
  128. package/cpp/rn-llama.h +149 -0
  129. package/cpp/rn-mtmd.hpp +602 -0
  130. package/cpp/rn-tts.cpp +591 -0
  131. package/cpp/rn-tts.h +59 -0
  132. package/cpp/sampling.cpp +579 -0
  133. package/cpp/sampling.h +107 -0
  134. package/cpp/tools/mtmd/clip-impl.h +473 -0
  135. package/cpp/tools/mtmd/clip.cpp +4322 -0
  136. package/cpp/tools/mtmd/clip.h +106 -0
  137. package/cpp/tools/mtmd/miniaudio/miniaudio.h +93468 -0
  138. package/cpp/tools/mtmd/mtmd-audio.cpp +769 -0
  139. package/cpp/tools/mtmd/mtmd-audio.h +47 -0
  140. package/cpp/tools/mtmd/mtmd-helper.cpp +460 -0
  141. package/cpp/tools/mtmd/mtmd-helper.h +91 -0
  142. package/cpp/tools/mtmd/mtmd.cpp +1066 -0
  143. package/cpp/tools/mtmd/mtmd.h +298 -0
  144. package/cpp/tools/mtmd/stb/stb_image.h +7988 -0
  145. package/cpp/unicode-data.cpp +7034 -0
  146. package/cpp/unicode-data.h +20 -0
  147. package/cpp/unicode.cpp +1061 -0
  148. package/cpp/unicode.h +68 -0
  149. package/package.json +2 -1
@@ -0,0 +1,491 @@
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+ #include "llama-arch.h"
5
+ #include "llama-graph.h"
6
+ #include "llama-hparams.h"
7
+ #include "llama-memory.h"
8
+ #include "llama-vocab.h"
9
+
10
+ #include <memory>
11
+ #include <string>
12
+ #include <unordered_map>
13
+ #include <vector>
14
+
15
+ struct llama_cparams;
16
+ struct llama_ubatch;
17
+ struct llama_model_loader;
18
+
19
+ // available models
20
+ enum llm_type {
21
+ LLM_TYPE_UNKNOWN,
22
+ LLM_TYPE_14M,
23
+ LLM_TYPE_17M,
24
+ LLM_TYPE_22M,
25
+ LLM_TYPE_33M,
26
+ LLM_TYPE_60M,
27
+ LLM_TYPE_70M,
28
+ LLM_TYPE_80M,
29
+ LLM_TYPE_109M,
30
+ LLM_TYPE_137M,
31
+ LLM_TYPE_160M,
32
+ LLM_TYPE_190M,
33
+ LLM_TYPE_220M,
34
+ LLM_TYPE_250M,
35
+ LLM_TYPE_256M,
36
+ LLM_TYPE_270M,
37
+ LLM_TYPE_335M,
38
+ LLM_TYPE_350M,
39
+ LLM_TYPE_410M,
40
+ LLM_TYPE_450M,
41
+ LLM_TYPE_475M,
42
+ LLM_TYPE_537M,
43
+ LLM_TYPE_700M,
44
+ LLM_TYPE_770M,
45
+ LLM_TYPE_780M,
46
+ LLM_TYPE_0_3B,
47
+ LLM_TYPE_0_5B,
48
+ LLM_TYPE_0_6B,
49
+ LLM_TYPE_1B,
50
+ LLM_TYPE_1_2B,
51
+ LLM_TYPE_1_3B,
52
+ LLM_TYPE_1_4B,
53
+ LLM_TYPE_1_5B,
54
+ LLM_TYPE_1_6B,
55
+ LLM_TYPE_1_7B,
56
+ LLM_TYPE_1_8B,
57
+ LLM_TYPE_2B,
58
+ LLM_TYPE_2_8B,
59
+ LLM_TYPE_2_9B,
60
+ LLM_TYPE_3B,
61
+ LLM_TYPE_4B,
62
+ LLM_TYPE_6B,
63
+ LLM_TYPE_6_9B,
64
+ LLM_TYPE_7B,
65
+ LLM_TYPE_8B,
66
+ LLM_TYPE_9B,
67
+ LLM_TYPE_11B,
68
+ LLM_TYPE_12B,
69
+ LLM_TYPE_13B,
70
+ LLM_TYPE_14B,
71
+ LLM_TYPE_15B,
72
+ LLM_TYPE_16B,
73
+ LLM_TYPE_20B,
74
+ LLM_TYPE_27B,
75
+ LLM_TYPE_30B,
76
+ LLM_TYPE_32B,
77
+ LLM_TYPE_34B,
78
+ LLM_TYPE_35B,
79
+ LLM_TYPE_36B,
80
+ LLM_TYPE_40B,
81
+ LLM_TYPE_65B,
82
+ LLM_TYPE_70B,
83
+ LLM_TYPE_120B,
84
+ LLM_TYPE_142B,
85
+ LLM_TYPE_236B,
86
+ LLM_TYPE_290B,
87
+ LLM_TYPE_314B,
88
+ LLM_TYPE_405B,
89
+ LLM_TYPE_671B,
90
+ LLM_TYPE_SMALL,
91
+ LLM_TYPE_MEDIUM,
92
+ LLM_TYPE_LARGE,
93
+ LLM_TYPE_XL,
94
+ LLM_TYPE_A1_7B,
95
+ LLM_TYPE_A2_7B,
96
+ LLM_TYPE_8x7B,
97
+ LLM_TYPE_8x22B,
98
+ LLM_TYPE_16x12B,
99
+ LLM_TYPE_16x3_8B,
100
+ LLM_TYPE_10B_128x3_66B,
101
+ LLM_TYPE_57B_A14B,
102
+ LLM_TYPE_17B_16E, // llama4 Scout
103
+ LLM_TYPE_17B_128E, // llama4 Maverick
104
+ LLM_TYPE_A13B,
105
+ LLM_TYPE_21B_A3B, // Ernie MoE small
106
+ LLM_TYPE_30B_A3B,
107
+ LLM_TYPE_106B_A12B, // GLM-4.5-Air
108
+ LLM_TYPE_235B_A22B,
109
+ LLM_TYPE_300B_A47B, // Ernie MoE big
110
+ LLM_TYPE_355B_A32B, // GLM-4.5
111
+ LLM_TYPE_E2B,
112
+ LLM_TYPE_E4B,
113
+ };
114
+
115
+ std::string llama_rope_scaling_type_name(llama_rope_scaling_type rope_scaling_type);
116
+
117
+ struct llama_layer_posnet {
118
+ // resnet
119
+ struct lm_ggml_tensor * norm1 = nullptr;
120
+ struct lm_ggml_tensor * norm1_b = nullptr;
121
+
122
+ struct lm_ggml_tensor * conv1 = nullptr;
123
+ struct lm_ggml_tensor * conv1_b = nullptr;
124
+
125
+ struct lm_ggml_tensor * norm2 = nullptr;
126
+ struct lm_ggml_tensor * norm2_b = nullptr;
127
+
128
+ struct lm_ggml_tensor * conv2 = nullptr;
129
+ struct lm_ggml_tensor * conv2_b = nullptr;
130
+
131
+ // attention
132
+ struct lm_ggml_tensor * attn_norm = nullptr;
133
+ struct lm_ggml_tensor * attn_norm_b = nullptr;
134
+
135
+ struct lm_ggml_tensor * attn_q = nullptr;
136
+ struct lm_ggml_tensor * attn_q_b = nullptr;
137
+
138
+ struct lm_ggml_tensor * attn_k = nullptr;
139
+ struct lm_ggml_tensor * attn_k_b = nullptr;
140
+
141
+ struct lm_ggml_tensor * attn_v = nullptr;
142
+ struct lm_ggml_tensor * attn_v_b = nullptr;
143
+
144
+ struct lm_ggml_tensor * attn_o = nullptr;
145
+ struct lm_ggml_tensor * attn_o_b = nullptr;
146
+
147
+ // normalize
148
+ struct lm_ggml_tensor * norm = nullptr;
149
+ struct lm_ggml_tensor * norm_b = nullptr;
150
+ };
151
+
152
+ struct llama_layer_convnext {
153
+ struct lm_ggml_tensor * dw = nullptr;
154
+ struct lm_ggml_tensor * dw_b = nullptr;
155
+
156
+ struct lm_ggml_tensor * norm = nullptr;
157
+ struct lm_ggml_tensor * norm_b = nullptr;
158
+
159
+ struct lm_ggml_tensor * pw1 = nullptr;
160
+ struct lm_ggml_tensor * pw1_b = nullptr;
161
+
162
+ struct lm_ggml_tensor * pw2 = nullptr;
163
+ struct lm_ggml_tensor * pw2_b = nullptr;
164
+
165
+ struct lm_ggml_tensor * gamma = nullptr;
166
+ };
167
+
168
+ struct llama_layer_shortconv {
169
+ struct lm_ggml_tensor * in_proj = nullptr;
170
+ struct lm_ggml_tensor * conv = nullptr;
171
+ struct lm_ggml_tensor * out_proj = nullptr;
172
+ };
173
+
174
+ struct llama_layer_nextn {
175
+ struct lm_ggml_tensor * eh_proj = nullptr;
176
+ struct lm_ggml_tensor * embed_tokens = nullptr;
177
+ struct lm_ggml_tensor * enorm = nullptr;
178
+ struct lm_ggml_tensor * hnorm = nullptr;
179
+ struct lm_ggml_tensor * shared_head_head = nullptr;
180
+ struct lm_ggml_tensor * shared_head_norm = nullptr;
181
+ };
182
+
183
+ struct llama_layer {
184
+ // normalization
185
+ struct lm_ggml_tensor * attn_norm = nullptr;
186
+ struct lm_ggml_tensor * attn_norm_b = nullptr;
187
+ struct lm_ggml_tensor * attn_norm_2 = nullptr;
188
+ struct lm_ggml_tensor * attn_norm_2_b = nullptr;
189
+ struct lm_ggml_tensor * attn_q_norm = nullptr;
190
+ struct lm_ggml_tensor * attn_q_norm_b = nullptr;
191
+ struct lm_ggml_tensor * attn_k_norm = nullptr;
192
+ struct lm_ggml_tensor * attn_k_norm_b = nullptr;
193
+ struct lm_ggml_tensor * attn_out_norm = nullptr;
194
+ struct lm_ggml_tensor * attn_out_norm_b = nullptr;
195
+ struct lm_ggml_tensor * attn_q_a_norm = nullptr;
196
+ struct lm_ggml_tensor * attn_kv_a_norm = nullptr;
197
+ struct lm_ggml_tensor * attn_sub_norm = nullptr;
198
+ struct lm_ggml_tensor * attn_post_norm = nullptr;
199
+ struct lm_ggml_tensor * ffn_sub_norm = nullptr;
200
+ struct lm_ggml_tensor * attn_norm_cross = nullptr;
201
+ struct lm_ggml_tensor * attn_norm_enc = nullptr;
202
+ struct lm_ggml_tensor * ssm_norm = nullptr;
203
+ struct lm_ggml_tensor * ssm_dt_norm = nullptr;
204
+ struct lm_ggml_tensor * ssm_b_norm = nullptr;
205
+ struct lm_ggml_tensor * ssm_c_norm = nullptr;
206
+
207
+ // attention
208
+ struct lm_ggml_tensor * wq = nullptr;
209
+ struct lm_ggml_tensor * wk = nullptr;
210
+ struct lm_ggml_tensor * wv = nullptr;
211
+ struct lm_ggml_tensor * wo = nullptr;
212
+ struct lm_ggml_tensor * wqkv = nullptr;
213
+ struct lm_ggml_tensor * wq_a = nullptr;
214
+ struct lm_ggml_tensor * wq_b = nullptr;
215
+ struct lm_ggml_tensor * wkv_a_mqa = nullptr;
216
+ struct lm_ggml_tensor * wkv_b = nullptr;
217
+ struct lm_ggml_tensor * wk_b = nullptr;
218
+ struct lm_ggml_tensor * wv_b = nullptr;
219
+ struct lm_ggml_tensor * wq_cross = nullptr;
220
+ struct lm_ggml_tensor * wk_cross = nullptr;
221
+ struct lm_ggml_tensor * wv_cross = nullptr;
222
+ struct lm_ggml_tensor * wo_cross = nullptr;
223
+ struct lm_ggml_tensor * wq_enc = nullptr;
224
+ struct lm_ggml_tensor * wk_enc = nullptr;
225
+ struct lm_ggml_tensor * wv_enc = nullptr;
226
+ struct lm_ggml_tensor * wo_enc = nullptr;
227
+
228
+ // attention bias
229
+ struct lm_ggml_tensor * bq = nullptr;
230
+ struct lm_ggml_tensor * bk = nullptr;
231
+ struct lm_ggml_tensor * bv = nullptr;
232
+ struct lm_ggml_tensor * bo = nullptr;
233
+ struct lm_ggml_tensor * bqkv = nullptr;
234
+
235
+ // relative position bias
236
+ struct lm_ggml_tensor * attn_rel_b = nullptr;
237
+ struct lm_ggml_tensor * attn_rel_b_enc = nullptr;
238
+ struct lm_ggml_tensor * attn_rel_b_cross = nullptr;
239
+
240
+ // normalization
241
+ struct lm_ggml_tensor * ffn_norm = nullptr;
242
+ struct lm_ggml_tensor * ffn_norm_b = nullptr;
243
+ struct lm_ggml_tensor * ffn_post_norm = nullptr;
244
+ struct lm_ggml_tensor * layer_out_norm = nullptr;
245
+ struct lm_ggml_tensor * layer_out_norm_b = nullptr;
246
+ struct lm_ggml_tensor * ffn_norm_exps = nullptr;
247
+ struct lm_ggml_tensor * ffn_norm_enc = nullptr;
248
+
249
+ // ff
250
+ struct lm_ggml_tensor * ffn_gate = nullptr; // w1
251
+ struct lm_ggml_tensor * ffn_down = nullptr; // w2
252
+ struct lm_ggml_tensor * ffn_up = nullptr; // w3
253
+ struct lm_ggml_tensor * ffn_gate_enc = nullptr;
254
+ struct lm_ggml_tensor * ffn_down_enc = nullptr;
255
+ struct lm_ggml_tensor * ffn_up_enc = nullptr;
256
+
257
+ // ff MoE
258
+ struct lm_ggml_tensor * ffn_gate_inp = nullptr;
259
+ struct lm_ggml_tensor * ffn_gate_exps = nullptr;
260
+ struct lm_ggml_tensor * ffn_down_exps = nullptr;
261
+ struct lm_ggml_tensor * ffn_up_exps = nullptr;
262
+ struct lm_ggml_tensor * ffn_gate_inp_b = nullptr;
263
+ struct lm_ggml_tensor * ffn_gate_exps_b = nullptr;
264
+ struct lm_ggml_tensor * ffn_down_exps_b = nullptr;
265
+ struct lm_ggml_tensor * ffn_up_exps_b = nullptr;
266
+
267
+ // ff shared expert (shexp)
268
+ struct lm_ggml_tensor * ffn_gate_inp_shexp = nullptr;
269
+ struct lm_ggml_tensor * ffn_gate_shexp = nullptr;
270
+ struct lm_ggml_tensor * ffn_down_shexp = nullptr;
271
+ struct lm_ggml_tensor * ffn_up_shexp = nullptr;
272
+
273
+ // ff bias
274
+ struct lm_ggml_tensor * ffn_gate_b = nullptr;
275
+ struct lm_ggml_tensor * ffn_down_b = nullptr; // b2
276
+ struct lm_ggml_tensor * ffn_up_b = nullptr; // b3
277
+ struct lm_ggml_tensor * ffn_act = nullptr;
278
+ struct lm_ggml_tensor * ffn_exp_probs_b = nullptr;
279
+
280
+ // mamba proj
281
+ struct lm_ggml_tensor * ssm_in = nullptr;
282
+ struct lm_ggml_tensor * ssm_x = nullptr;
283
+ struct lm_ggml_tensor * ssm_dt = nullptr;
284
+ struct lm_ggml_tensor * ssm_out = nullptr;
285
+
286
+ // mamba
287
+ struct lm_ggml_tensor * ssm_conv1d = nullptr;
288
+ struct lm_ggml_tensor * ssm_a = nullptr;
289
+ struct lm_ggml_tensor * ssm_d = nullptr;
290
+
291
+ // mamba bias
292
+ struct lm_ggml_tensor * ssm_conv1d_b = nullptr;
293
+ struct lm_ggml_tensor * ssm_dt_b = nullptr;
294
+
295
+ // rwkv
296
+ struct lm_ggml_tensor * time_mix_w1 = nullptr;
297
+ struct lm_ggml_tensor * time_mix_w2 = nullptr;
298
+ struct lm_ggml_tensor * time_mix_lerp_x = nullptr;
299
+ struct lm_ggml_tensor * time_mix_lerp_w = nullptr;
300
+ struct lm_ggml_tensor * time_mix_lerp_k = nullptr;
301
+ struct lm_ggml_tensor * time_mix_lerp_v = nullptr;
302
+ struct lm_ggml_tensor * time_mix_lerp_r = nullptr;
303
+ struct lm_ggml_tensor * time_mix_lerp_g = nullptr;
304
+ struct lm_ggml_tensor * time_mix_lerp_fused = nullptr;
305
+
306
+ struct lm_ggml_tensor * time_mix_first = nullptr;
307
+ struct lm_ggml_tensor * time_mix_decay = nullptr;
308
+ struct lm_ggml_tensor * time_mix_decay_w1 = nullptr;
309
+ struct lm_ggml_tensor * time_mix_decay_w2 = nullptr;
310
+ struct lm_ggml_tensor * time_mix_key = nullptr;
311
+ struct lm_ggml_tensor * time_mix_key_b = nullptr;
312
+ struct lm_ggml_tensor * time_mix_value = nullptr;
313
+ struct lm_ggml_tensor * time_mix_value_b = nullptr;
314
+ struct lm_ggml_tensor * time_mix_receptance = nullptr;
315
+ struct lm_ggml_tensor * time_mix_receptance_b = nullptr;
316
+ struct lm_ggml_tensor * time_mix_gate = nullptr;
317
+
318
+ // rwkv7
319
+ struct lm_ggml_tensor * time_mix_w0 = nullptr;
320
+ struct lm_ggml_tensor * time_mix_a0 = nullptr;
321
+ struct lm_ggml_tensor * time_mix_a1 = nullptr;
322
+ struct lm_ggml_tensor * time_mix_a2 = nullptr;
323
+ struct lm_ggml_tensor * time_mix_v0 = nullptr;
324
+ struct lm_ggml_tensor * time_mix_v1 = nullptr;
325
+ struct lm_ggml_tensor * time_mix_v2 = nullptr;
326
+ struct lm_ggml_tensor * time_mix_g1 = nullptr;
327
+ struct lm_ggml_tensor * time_mix_g2 = nullptr;
328
+ struct lm_ggml_tensor * time_mix_k_k = nullptr;
329
+ struct lm_ggml_tensor * time_mix_k_a = nullptr;
330
+ struct lm_ggml_tensor * time_mix_r_k = nullptr;
331
+
332
+ struct lm_ggml_tensor * time_mix_ln = nullptr;
333
+ struct lm_ggml_tensor * time_mix_ln_b = nullptr;
334
+ struct lm_ggml_tensor * time_mix_output = nullptr;
335
+
336
+ struct lm_ggml_tensor * channel_mix_lerp_k = nullptr;
337
+ struct lm_ggml_tensor * channel_mix_lerp_r = nullptr;
338
+
339
+ struct lm_ggml_tensor * channel_mix_key = nullptr;
340
+ struct lm_ggml_tensor * channel_mix_receptance = nullptr;
341
+ struct lm_ggml_tensor * channel_mix_value = nullptr;
342
+
343
+ // long rope factors
344
+ struct lm_ggml_tensor * rope_long = nullptr;
345
+ struct lm_ggml_tensor * rope_short = nullptr;
346
+ struct lm_ggml_tensor * rope_freqs = nullptr;
347
+
348
+ // bitnet scale
349
+ struct lm_ggml_tensor * wq_scale = nullptr;
350
+ struct lm_ggml_tensor * wk_scale = nullptr;
351
+ struct lm_ggml_tensor * wv_scale = nullptr;
352
+ struct lm_ggml_tensor * wo_scale = nullptr;
353
+ struct lm_ggml_tensor * ffn_gate_scale = nullptr;
354
+ struct lm_ggml_tensor * ffn_up_scale = nullptr;
355
+ struct lm_ggml_tensor * ffn_down_scale = nullptr;
356
+
357
+ // altup & laurel
358
+ struct lm_ggml_tensor * per_layer_inp_gate = nullptr;
359
+ struct lm_ggml_tensor * per_layer_proj = nullptr;
360
+ struct lm_ggml_tensor * per_layer_post_norm = nullptr;
361
+ struct lm_ggml_tensor * altup_correct_coef = nullptr;
362
+ struct lm_ggml_tensor * altup_correct_scale = nullptr;
363
+ struct lm_ggml_tensor * altup_predict_coef = nullptr;
364
+ struct lm_ggml_tensor * altup_router = nullptr;
365
+ struct lm_ggml_tensor * altup_router_norm = nullptr;
366
+ struct lm_ggml_tensor * laurel_l = nullptr;
367
+ struct lm_ggml_tensor * laurel_r = nullptr;
368
+ struct lm_ggml_tensor * laurel_post_norm = nullptr;
369
+
370
+ // openai-moe
371
+ struct lm_ggml_tensor * attn_sinks = nullptr;
372
+
373
+ struct llama_layer_posnet posnet;
374
+
375
+ struct llama_layer_convnext convnext;
376
+
377
+ struct llama_layer_shortconv shortconv;
378
+
379
+ struct llama_layer_nextn nextn;
380
+ };
381
+
382
+ struct llama_model {
383
+ llm_type type = LLM_TYPE_UNKNOWN;
384
+ llm_arch arch = LLM_ARCH_UNKNOWN;
385
+
386
+ std::string name = "n/a";
387
+
388
+ llama_hparams hparams = {};
389
+ llama_vocab vocab;
390
+
391
+ // for classifier models
392
+ std::vector<std::string> classifier_labels;
393
+
394
+ struct lm_ggml_tensor * tok_embd = nullptr;
395
+ struct lm_ggml_tensor * type_embd = nullptr;
396
+ struct lm_ggml_tensor * pos_embd = nullptr;
397
+ struct lm_ggml_tensor * tok_norm = nullptr;
398
+ struct lm_ggml_tensor * tok_norm_b = nullptr;
399
+
400
+ struct lm_ggml_tensor * output_norm = nullptr;
401
+ struct lm_ggml_tensor * output_norm_b = nullptr;
402
+ struct lm_ggml_tensor * output = nullptr;
403
+ struct lm_ggml_tensor * output_b = nullptr;
404
+ struct lm_ggml_tensor * output_norm_enc = nullptr;
405
+
406
+ // classifier
407
+ struct lm_ggml_tensor * cls = nullptr;
408
+ struct lm_ggml_tensor * cls_b = nullptr;
409
+ struct lm_ggml_tensor * cls_out = nullptr;
410
+ struct lm_ggml_tensor * cls_out_b = nullptr;
411
+
412
+ struct lm_ggml_tensor * conv1d = nullptr;
413
+ struct lm_ggml_tensor * conv1d_b = nullptr;
414
+
415
+ // gemma3n altup
416
+ struct lm_ggml_tensor * tok_embd_per_layer = nullptr;
417
+ struct lm_ggml_tensor * altup_proj = nullptr;
418
+ struct lm_ggml_tensor * altup_unembd_proj = nullptr;
419
+ struct lm_ggml_tensor * per_layer_model_proj = nullptr;
420
+ struct lm_ggml_tensor * per_layer_proj_norm = nullptr;
421
+
422
+ std::vector<llama_layer> layers;
423
+
424
+ llama_model_params params;
425
+
426
+ // gguf metadata
427
+ std::unordered_map<std::string, std::string> lm_gguf_kv;
428
+
429
+ // list of devices used in this model
430
+ std::vector<lm_ggml_backend_dev_t> devices;
431
+
432
+ // for quantize-stats only
433
+ std::vector<std::pair<std::string, struct lm_ggml_tensor *>> tensors_by_name;
434
+
435
+ int64_t t_load_us = 0;
436
+ int64_t t_start_us = 0;
437
+
438
+ explicit llama_model(const struct llama_model_params & params);
439
+ ~llama_model();
440
+
441
+ void load_stats (llama_model_loader & ml);
442
+ void load_arch (llama_model_loader & ml);
443
+ void load_hparams(llama_model_loader & ml);
444
+ void load_vocab (llama_model_loader & ml);
445
+ bool load_tensors(llama_model_loader & ml); // returns false if cancelled by progress_callback
446
+
447
+ std::string arch_name() const;
448
+ std::string type_name() const;
449
+
450
+ std::string desc() const;
451
+
452
+ size_t size() const;
453
+ size_t n_tensors() const;
454
+ size_t n_devices() const;
455
+
456
+ // total number of parameters in the model
457
+ uint64_t n_elements() const;
458
+
459
+ void print_info() const;
460
+
461
+ lm_ggml_backend_dev_t dev_layer(int il) const;
462
+ lm_ggml_backend_dev_t dev_output() const;
463
+
464
+ lm_ggml_backend_buffer_type_t select_buft(int il) const;
465
+
466
+ bool has_tensor_overrides() const;
467
+
468
+ const struct lm_ggml_tensor * get_tensor(const char * name) const;
469
+
470
+ float get_rope_freq_base (const llama_cparams & cparams, int il) const;
471
+ float get_rope_freq_scale(const llama_cparams & cparams, int il) const;
472
+
473
+ lm_ggml_tensor * get_rope_factors(const llama_cparams & cparams, int il) const;
474
+
475
+ // note: can mutate `cparams`
476
+ // TODO: move this to new llm_arch_model_i interface
477
+ llama_memory_i * create_memory(const llama_memory_params & params, llama_cparams & cparams) const;
478
+
479
+ // TODO: move this to new llm_arch_model_i interface
480
+ lm_ggml_cgraph * build_graph(const llm_graph_params & params) const;
481
+
482
+ private:
483
+ struct impl;
484
+ std::unique_ptr<impl> pimpl;
485
+ };
486
+
487
+ const char * llm_type_name(llm_type type);
488
+
489
+ // For internal test use
490
+ // TODO: remove
491
+ const std::vector<std::pair<std::string, lm_ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model);