cui-llama.rn 1.4.6 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/android/src/main/CMakeLists.txt +9 -2
  2. package/android/src/main/jni.cpp +52 -34
  3. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  4. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  5. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  9. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  10. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  11. package/cpp/binary-ops.cpp +158 -0
  12. package/cpp/binary-ops.h +16 -0
  13. package/cpp/chat.cpp +1769 -1779
  14. package/cpp/chat.h +9 -1
  15. package/cpp/common.cpp +20 -522
  16. package/cpp/common.h +13 -36
  17. package/cpp/cpu-common.h +72 -0
  18. package/cpp/ggml-common.h +12 -6
  19. package/cpp/ggml-cpu-aarch64.cpp +1557 -80
  20. package/cpp/ggml-cpu-impl.h +2 -21
  21. package/cpp/ggml-cpu-quants.c +904 -405
  22. package/cpp/ggml-cpu.c +909 -13237
  23. package/cpp/ggml-impl.h +50 -23
  24. package/cpp/ggml-metal-impl.h +77 -3
  25. package/cpp/ggml-metal.m +794 -580
  26. package/cpp/ggml.c +92 -3
  27. package/cpp/ggml.h +29 -5
  28. package/cpp/gguf.cpp +1 -0
  29. package/cpp/llama-adapter.cpp +55 -20
  30. package/cpp/llama-adapter.h +11 -9
  31. package/cpp/llama-arch.cpp +217 -16
  32. package/cpp/llama-arch.h +25 -0
  33. package/cpp/llama-batch.h +2 -2
  34. package/cpp/llama-chat.cpp +54 -2
  35. package/cpp/llama-chat.h +3 -0
  36. package/cpp/llama-context.cpp +2294 -1238
  37. package/cpp/llama-context.h +214 -77
  38. package/cpp/llama-cparams.h +1 -0
  39. package/cpp/llama-graph.cpp +1695 -0
  40. package/cpp/llama-graph.h +592 -0
  41. package/cpp/llama-hparams.cpp +8 -0
  42. package/cpp/llama-hparams.h +17 -0
  43. package/cpp/llama-io.cpp +15 -0
  44. package/cpp/llama-io.h +35 -0
  45. package/cpp/llama-kv-cache.cpp +965 -303
  46. package/cpp/llama-kv-cache.h +145 -151
  47. package/cpp/llama-memory.cpp +1 -0
  48. package/cpp/llama-memory.h +21 -0
  49. package/cpp/llama-mmap.cpp +1 -1
  50. package/cpp/llama-model-loader.cpp +10 -5
  51. package/cpp/llama-model-loader.h +5 -3
  52. package/cpp/llama-model.cpp +9194 -201
  53. package/cpp/llama-model.h +40 -1
  54. package/cpp/llama-sampling.cpp +5 -0
  55. package/cpp/llama-vocab.cpp +36 -5
  56. package/cpp/llama.cpp +51 -9984
  57. package/cpp/llama.h +102 -22
  58. package/cpp/log.cpp +34 -0
  59. package/cpp/minja/chat-template.hpp +15 -7
  60. package/cpp/minja/minja.hpp +120 -94
  61. package/cpp/ops.cpp +8723 -0
  62. package/cpp/ops.h +128 -0
  63. package/cpp/rn-llama.cpp +44 -53
  64. package/cpp/rn-llama.h +2 -12
  65. package/cpp/sampling.cpp +3 -0
  66. package/cpp/sgemm.cpp +533 -88
  67. package/cpp/simd-mappings.h +888 -0
  68. package/cpp/speculative.cpp +4 -4
  69. package/cpp/unary-ops.cpp +186 -0
  70. package/cpp/unary-ops.h +28 -0
  71. package/cpp/vec.cpp +258 -0
  72. package/cpp/vec.h +802 -0
  73. package/ios/CMakeLists.txt +5 -2
  74. package/ios/RNLlama.mm +2 -2
  75. package/ios/RNLlamaContext.mm +40 -24
  76. package/package.json +1 -1
  77. package/src/NativeRNLlama.ts +6 -4
  78. package/src/index.ts +3 -1
  79. package/cpp/chat-template.hpp +0 -529
  80. package/cpp/minja.hpp +0 -2915
@@ -0,0 +1,592 @@
1
+ #pragma once
2
+
3
+ #include "llama-arch.h"
4
+ #include "llama-hparams.h"
5
+ #include "llama-adapter.h"
6
+
7
+ #include <cstdint>
8
+ #include <vector>
9
+ #include <memory>
10
+ #include <set>
11
+ #include <functional>
12
+
13
+ struct lm_ggml_cgraph;
14
+ struct lm_ggml_context;
15
+ struct lm_ggml_tensor;
16
+
17
+ struct llama_ubatch;
18
+ struct llama_cparams;
19
+
20
+ class llama_memory_i;
21
+ class llama_kv_cache_unified;
22
+
23
+ // certain models (typically multi-modal) can produce different types of graphs
24
+ enum llm_graph_type {
25
+ LLM_GRAPH_TYPE_DEFAULT,
26
+ LLM_GRAPH_TYPE_ENCODER,
27
+ LLM_GRAPH_TYPE_DECODER,
28
+ };
29
+
30
+ enum llm_ffn_op_type {
31
+ LLM_FFN_SILU,
32
+ LLM_FFN_GELU,
33
+ LLM_FFN_RELU,
34
+ LLM_FFN_RELU_SQR,
35
+ LLM_FFN_SWIGLU,
36
+ };
37
+
38
+ enum llm_ffn_gate_type {
39
+ LLM_FFN_SEQ,
40
+ LLM_FFN_PAR, // ffn_gate is parallel to ffn_up
41
+ };
42
+
43
+ enum llm_norm_type {
44
+ LLM_NORM,
45
+ LLM_NORM_RMS,
46
+ LLM_NORM_GROUP,
47
+ };
48
+
49
+ // TODO: tmp - need something better to pass the data from the encoder to the decoder
50
+ struct llama_cross {
51
+ // the output embeddings from the encoder as a ggml tensor
52
+ // TODO: this needs more work to be correct, for now copy the embeddings data to host memory
53
+ // ref: https://github.com/ggml-org/llama.cpp/pull/11213#discussion_r1969892524
54
+ //lm_ggml_tensor * t_embd = nullptr;
55
+
56
+ int64_t n_embd = 0;
57
+ int64_t n_enc = 0;
58
+
59
+ // embeddings data copied to host memory (tmp)
60
+ std::vector<float> v_embd;
61
+
62
+ // needed to construct the cross-attention mask in the decoder
63
+ std::vector<std::set<llama_seq_id>> seq_ids_enc;
64
+ };
65
+
66
+ //
67
+ // llm_graph_input
68
+ //
69
+
70
+ class llm_graph_input_i {
71
+ public:
72
+ virtual ~llm_graph_input_i() = default;
73
+
74
+ virtual void set_input(const llama_ubatch * ubatch) = 0;
75
+ };
76
+
77
+ using llm_graph_input_ptr = std::unique_ptr<llm_graph_input_i>;
78
+
79
+
80
+ class llm_graph_input_embd : public llm_graph_input_i {
81
+ public:
82
+ llm_graph_input_embd() = default;
83
+ virtual ~llm_graph_input_embd() = default;
84
+
85
+ void set_input(const llama_ubatch * ubatch) override;
86
+
87
+ lm_ggml_tensor * tokens = nullptr; // I32 [n_batch]
88
+ lm_ggml_tensor * embd = nullptr; // F32 [n_embd, n_batch]
89
+ };
90
+
91
+ class llm_graph_input_pos : public llm_graph_input_i {
92
+ public:
93
+ llm_graph_input_pos(int64_t n_pos_per_token) : n_pos_per_token(n_pos_per_token) {}
94
+ virtual ~llm_graph_input_pos() = default;
95
+
96
+ void set_input(const llama_ubatch * ubatch) override;
97
+
98
+ lm_ggml_tensor * pos = nullptr; // I32 [n_batch]
99
+
100
+ const int64_t n_pos_per_token = 1;
101
+ };
102
+
103
+ // temperature tuning, used by llama4
104
+ class llm_graph_input_attn_temp : public llm_graph_input_i {
105
+ public:
106
+ llm_graph_input_attn_temp(int64_t n_pos_per_token, uint32_t n_attn_temp_floor_scale, float f_attn_temp_scale)
107
+ : n_pos_per_token(n_pos_per_token), n_attn_temp_floor_scale(n_attn_temp_floor_scale), f_attn_temp_scale(f_attn_temp_scale) {}
108
+ virtual ~llm_graph_input_attn_temp() = default;
109
+
110
+ void set_input(const llama_ubatch * ubatch) override;
111
+
112
+ lm_ggml_tensor * attn_scale = nullptr; // F32 [n_batch]
113
+
114
+ const int64_t n_pos_per_token = 1;
115
+
116
+ const uint32_t n_attn_temp_floor_scale;
117
+ const float f_attn_temp_scale;
118
+ };
119
+
120
+ class llm_graph_input_pos_bucket : public llm_graph_input_i {
121
+ public:
122
+ llm_graph_input_pos_bucket(const llama_hparams & hparams) : hparams(hparams) {}
123
+ virtual ~llm_graph_input_pos_bucket() = default;
124
+
125
+ void set_input(const llama_ubatch * ubatch) override;
126
+
127
+ lm_ggml_tensor * pos_bucket = nullptr; // I32 [n_batch, n_batch]
128
+
129
+ const llama_hparams & hparams;
130
+ };
131
+
132
+ class llm_graph_input_pos_bucket_kv : public llm_graph_input_i {
133
+ public:
134
+ llm_graph_input_pos_bucket_kv(
135
+ const llama_hparams & hparams,
136
+ const llama_kv_cache_unified * kv_self) : hparams(hparams), kv_self(kv_self) {}
137
+ virtual ~llm_graph_input_pos_bucket_kv() = default;
138
+
139
+ void set_input(const llama_ubatch * ubatch) override;
140
+
141
+ lm_ggml_tensor * pos_bucket = nullptr; // I32 [n_kv, n_batch]
142
+
143
+ const llama_hparams & hparams;
144
+ const llama_kv_cache_unified * kv_self;
145
+ };
146
+
147
+ class llm_graph_input_out_ids : public llm_graph_input_i {
148
+ public:
149
+ llm_graph_input_out_ids(
150
+ const llama_hparams & hparams,
151
+ const llama_cparams & cparams,
152
+ int32_t n_outputs) : hparams(hparams), cparams(cparams), n_outputs(n_outputs) {}
153
+ virtual ~llm_graph_input_out_ids() = default;
154
+
155
+ void set_input(const llama_ubatch * ubatch) override;
156
+
157
+ lm_ggml_tensor * out_ids; // I32 [n_outputs]
158
+
159
+ const llama_hparams & hparams;
160
+ const llama_cparams & cparams;
161
+
162
+ const int32_t n_outputs;
163
+ };
164
+
165
+ class llm_graph_input_mean : public llm_graph_input_i {
166
+ public:
167
+ llm_graph_input_mean(const llama_cparams & cparams) : cparams(cparams) {}
168
+ virtual ~llm_graph_input_mean() = default;
169
+
170
+ void set_input(const llama_ubatch * ubatch) override;
171
+
172
+ lm_ggml_tensor * mean; // F32 [n_batch, n_batch]
173
+
174
+ const llama_cparams & cparams;
175
+ };
176
+
177
+ class llm_graph_input_cls : public llm_graph_input_i {
178
+ public:
179
+ llm_graph_input_cls(const llama_cparams & cparams) : cparams(cparams) {}
180
+ virtual ~llm_graph_input_cls() = default;
181
+
182
+ void set_input(const llama_ubatch * ubatch) override;
183
+
184
+ lm_ggml_tensor * cls; // I32 [n_batch]
185
+
186
+ const llama_cparams & cparams;
187
+ };
188
+
189
+ class llm_graph_input_s_copy : public llm_graph_input_i {
190
+ public:
191
+ llm_graph_input_s_copy(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {}
192
+ virtual ~llm_graph_input_s_copy() = default;
193
+
194
+ void set_input(const llama_ubatch * ubatch) override;
195
+
196
+ lm_ggml_tensor * s_copy; // I32 [kv_size]
197
+
198
+ const llama_kv_cache_unified * kv_self;
199
+ };
200
+
201
+ class llm_graph_input_s_mask : public llm_graph_input_i {
202
+ public:
203
+ llm_graph_input_s_mask(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {}
204
+ virtual ~llm_graph_input_s_mask() = default;
205
+
206
+ void set_input(const llama_ubatch * ubatch) override;
207
+
208
+ lm_ggml_tensor * s_mask; // F32 [1, n_kv]
209
+
210
+ const llama_kv_cache_unified * kv_self;
211
+ };
212
+
213
+ class llm_graph_input_cross_embd : public llm_graph_input_i {
214
+ public:
215
+ llm_graph_input_cross_embd(
216
+ const llama_cross * cross) : cross(cross) {}
217
+ virtual ~llm_graph_input_cross_embd() = default;
218
+
219
+ void set_input(const llama_ubatch * ubatch) override;
220
+
221
+ lm_ggml_tensor * cross_embd; // F32 [n_embd, n_outputs_enc]
222
+
223
+ const llama_cross * cross;
224
+ };
225
+
226
+ class llm_graph_input_attn_no_cache : public llm_graph_input_i {
227
+ public:
228
+ llm_graph_input_attn_no_cache(const llama_hparams & hparams, const llama_cparams & cparams) :
229
+ hparams(hparams),
230
+ cparams(cparams) {
231
+ }
232
+ ~llm_graph_input_attn_no_cache() = default;
233
+
234
+ void set_input(const llama_ubatch * ubatch) override;
235
+
236
+ lm_ggml_tensor * get_kq_mask() const { return kq_mask_cnv; }
237
+
238
+ lm_ggml_tensor * kq_mask = nullptr; // F32 [n_tokens, n_batch]
239
+ lm_ggml_tensor * kq_mask_cnv = nullptr; // [n_tokens, n_batch]
240
+
241
+ const llama_hparams & hparams;
242
+ const llama_cparams & cparams;
243
+ };
244
+
245
+ class llm_graph_input_attn_kv_unified : public llm_graph_input_i {
246
+ public:
247
+ llm_graph_input_attn_kv_unified(
248
+ const llama_hparams & hparams,
249
+ const llama_cparams & cparams,
250
+ const llama_kv_cache_unified * kv_self) :
251
+ hparams(hparams),
252
+ cparams(cparams),
253
+ kv_self(kv_self) {
254
+ }
255
+ ~llm_graph_input_attn_kv_unified() = default;
256
+
257
+ void set_input(const llama_ubatch * ubatch) override;
258
+
259
+ lm_ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; }
260
+ lm_ggml_tensor * get_kq_mask_swa() const { return self_kq_mask_swa_cnv; }
261
+
262
+ lm_ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch]
263
+ lm_ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch]
264
+ lm_ggml_tensor * self_kq_mask_swa = nullptr; // F32 [n_kv, n_batch]
265
+ lm_ggml_tensor * self_kq_mask_swa_cnv = nullptr; // [n_kv, n_batch]
266
+
267
+ const llama_hparams & hparams;
268
+ const llama_cparams & cparams;
269
+
270
+ const llama_kv_cache_unified * kv_self;
271
+ };
272
+
273
+ class llm_graph_input_attn_cross : public llm_graph_input_i {
274
+ public:
275
+ llm_graph_input_attn_cross(const llama_cross * cross) : cross(cross) {}
276
+ ~llm_graph_input_attn_cross() = default;
277
+
278
+ void set_input(const llama_ubatch * ubatch) override;
279
+
280
+ lm_ggml_tensor * get_kq_mask_cross() const { return cross_kq_mask_cnv; }
281
+
282
+ lm_ggml_tensor * cross_kq_mask = nullptr; // F32 [n_outputs_enc, n_batch]
283
+ lm_ggml_tensor * cross_kq_mask_cnv = nullptr; // F32 [n_outputs_enc, n_batch]
284
+
285
+ const llama_cross * cross = nullptr;
286
+ };
287
+
288
+ //
289
+ // llm_graph_result
290
+ //
291
+
292
+ // these objects deliver the result from the graph build process back to the llama_context
293
+ // note that the input tensors created for the graph are referenced here - the goal is to be able to populate their
294
+ // specific data, by calling the set_inputs() method
295
+ // along with the input tensors, the object also provides commonly used outputs tensors, such as logits, embeddings, etc.
296
+ // these are used by the llama_context to extact the relevant data, based on the compute parameters
297
+
298
+ class llm_graph_result_i {
299
+ public:
300
+ virtual ~llm_graph_result_i() = default;
301
+
302
+ virtual lm_ggml_tensor * get_logits() = 0;
303
+ virtual lm_ggml_tensor * get_embd() = 0;
304
+ virtual lm_ggml_tensor * get_embd_pooled() = 0;
305
+
306
+ virtual void set_inputs(const llama_ubatch * ubatch) = 0;
307
+ };
308
+
309
+ using llm_graph_result_ptr = std::unique_ptr<llm_graph_result_i>;
310
+
311
+
312
+ class llm_graph_result : public llm_graph_result_i {
313
+ public:
314
+ virtual ~llm_graph_result() = default;
315
+
316
+ lm_ggml_tensor * get_logits() override { return t_logits; }
317
+ lm_ggml_tensor * get_embd() override { return t_embd; }
318
+ lm_ggml_tensor * get_embd_pooled() override { return t_embd_pooled; }
319
+
320
+ void set_inputs(const llama_ubatch * ubatch) override {
321
+ for (auto & input : inputs) {
322
+ input->set_input(ubatch);
323
+ }
324
+ }
325
+
326
+ llm_graph_input_i * add_input(llm_graph_input_ptr input) {
327
+ inputs.emplace_back(std::move(input));
328
+ return inputs.back().get();
329
+ }
330
+
331
+ // important graph nodes
332
+ lm_ggml_tensor * t_logits = nullptr;
333
+ lm_ggml_tensor * t_embd = nullptr;
334
+ lm_ggml_tensor * t_embd_pooled = nullptr;
335
+
336
+ std::vector<llm_graph_input_ptr> inputs;
337
+ };
338
+
339
+ //
340
+ // llm_graph_context
341
+ //
342
+
343
+ // callback that allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
344
+ using llm_graph_cb = std::function<void(const llama_ubatch & ubatch, lm_ggml_tensor * cur, const char * name, int il)>;
345
+
346
+ struct llm_graph_params {
347
+ lm_ggml_context * ctx;
348
+
349
+ const llm_arch arch;
350
+
351
+ const llama_hparams & hparams;
352
+ const llama_cparams & cparams;
353
+ const llama_ubatch & ubatch;
354
+
355
+ lm_ggml_backend_sched * sched;
356
+ lm_ggml_backend * backend_cpu;
357
+
358
+ const llama_adapter_cvec * cvec;
359
+ const llama_adapter_loras * loras;
360
+ const llama_memory_i * memory;
361
+ const llama_cross * cross;
362
+
363
+ int32_t n_outputs;
364
+
365
+ const llm_graph_cb & cb;
366
+ };
367
+
368
+ struct llm_graph_context {
369
+ const llm_arch arch;
370
+
371
+ const llama_hparams & hparams;
372
+ const llama_cparams & cparams;
373
+ const llama_ubatch & ubatch;
374
+
375
+ const int64_t n_embd;
376
+ const int64_t n_layer;
377
+ const int64_t n_rot;
378
+ const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
379
+ const int64_t n_ctx_per_seq;
380
+ const int64_t n_head;
381
+ const int64_t n_head_kv;
382
+ const int64_t n_embd_head_k;
383
+ const int64_t n_embd_k_gqa;
384
+ const int64_t n_embd_head_v;
385
+ const int64_t n_embd_v_gqa;
386
+ const int64_t n_expert;
387
+ const int64_t n_expert_used;
388
+
389
+ const float freq_base;
390
+ const float freq_scale;
391
+ const float ext_factor;
392
+ const float attn_factor;
393
+ const float beta_fast;
394
+ const float beta_slow;
395
+ const float norm_eps;
396
+ const float norm_rms_eps;
397
+
398
+ const int32_t n_tokens;
399
+ const int32_t n_outputs;
400
+ const int32_t n_ctx_orig; // yarn
401
+
402
+ const enum llama_pooling_type pooling_type;
403
+ const enum llama_rope_type rope_type;
404
+
405
+ lm_ggml_context * ctx0 = nullptr;
406
+
407
+ lm_ggml_backend_sched * sched;
408
+
409
+ lm_ggml_backend * backend_cpu; // TODO: needed by build_attn_mha, figure out a way to remove?
410
+
411
+ const llama_adapter_cvec * cvec;
412
+ const llama_adapter_loras * loras;
413
+ const llama_memory_i * memory;
414
+ const llama_cross * cross;
415
+
416
+ const llm_graph_cb & cb_func;
417
+
418
+ std::unique_ptr<llm_graph_result> res;
419
+
420
+ llm_graph_context(const llm_graph_params & params);
421
+
422
+ int64_t n_pos_per_token() const;
423
+
424
+ void cb(lm_ggml_tensor * cur, const char * name, int il) const;
425
+
426
+ //
427
+ // common
428
+ //
429
+
430
+ lm_ggml_tensor * build_cvec(
431
+ lm_ggml_tensor * cur,
432
+ int il) const;
433
+
434
+ // do mat_mul, while optionally apply lora
435
+ lm_ggml_tensor * build_lora_mm(
436
+ lm_ggml_tensor * w,
437
+ lm_ggml_tensor * cur) const;
438
+
439
+ // do mat_mul_id, while optionally apply lora
440
+ lm_ggml_tensor * build_lora_mm_id(
441
+ lm_ggml_tensor * w, // lm_ggml_tensor * as
442
+ lm_ggml_tensor * cur, // lm_ggml_tensor * b
443
+ lm_ggml_tensor * ids) const;
444
+
445
+ lm_ggml_tensor * build_norm(
446
+ lm_ggml_tensor * cur,
447
+ lm_ggml_tensor * mw,
448
+ lm_ggml_tensor * mb,
449
+ llm_norm_type type,
450
+ int il) const;
451
+
452
+ lm_ggml_tensor * build_ffn(
453
+ lm_ggml_tensor * cur,
454
+ lm_ggml_tensor * up,
455
+ lm_ggml_tensor * up_b,
456
+ lm_ggml_tensor * up_s,
457
+ lm_ggml_tensor * gate,
458
+ lm_ggml_tensor * gate_b,
459
+ lm_ggml_tensor * gate_s,
460
+ lm_ggml_tensor * down,
461
+ lm_ggml_tensor * down_b,
462
+ lm_ggml_tensor * down_s,
463
+ lm_ggml_tensor * act_scales,
464
+ llm_ffn_op_type type_op,
465
+ llm_ffn_gate_type type_gate,
466
+ int il) const;
467
+
468
+ lm_ggml_tensor * build_moe_ffn(
469
+ lm_ggml_tensor * cur,
470
+ lm_ggml_tensor * gate_inp,
471
+ lm_ggml_tensor * up_exps,
472
+ lm_ggml_tensor * gate_exps,
473
+ lm_ggml_tensor * down_exps,
474
+ lm_ggml_tensor * exp_probs_b,
475
+ int64_t n_expert,
476
+ int64_t n_expert_used,
477
+ llm_ffn_op_type type_op,
478
+ bool norm_w,
479
+ bool scale_w,
480
+ float w_scale,
481
+ llama_expert_gating_func_type gating_op,
482
+ int il) const;
483
+
484
+ //
485
+ // inputs
486
+ //
487
+
488
+ lm_ggml_tensor * build_inp_embd(lm_ggml_tensor * tok_embd) const;
489
+ lm_ggml_tensor * build_inp_pos() const;
490
+ lm_ggml_tensor * build_inp_attn_scale() const;
491
+ lm_ggml_tensor * build_inp_out_ids() const;
492
+ lm_ggml_tensor * build_inp_mean() const;
493
+ lm_ggml_tensor * build_inp_cls() const;
494
+ lm_ggml_tensor * build_inp_s_copy() const;
495
+ lm_ggml_tensor * build_inp_s_mask() const;
496
+
497
+ lm_ggml_tensor * build_inp_cross_embd() const;
498
+ lm_ggml_tensor * build_inp_pos_bucket_enc() const;
499
+ lm_ggml_tensor * build_inp_pos_bucket_dec() const;
500
+ lm_ggml_tensor * build_pos_bias(lm_ggml_tensor * pos_bucket, lm_ggml_tensor * attn_rel_b) const;
501
+
502
+ //
503
+ // attention
504
+ //
505
+
506
+ lm_ggml_tensor * build_attn_mha(
507
+ lm_ggml_cgraph * gf,
508
+ lm_ggml_tensor * q, // [n_embd_head_q, n_tokens, n_head_q]
509
+ lm_ggml_tensor * k, // [n_embd_head_k, n_tokens, n_head_k]
510
+ lm_ggml_tensor * v, // [n_embd_head_v, n_tokens, n_head_v] (v_trans == false)
511
+ lm_ggml_tensor * kq_b,
512
+ lm_ggml_tensor * kq_mask,
513
+ bool v_trans,
514
+ float kq_scale) const;
515
+
516
+ llm_graph_input_attn_no_cache * build_attn_inp_no_cache() const;
517
+
518
+ lm_ggml_tensor * build_attn(
519
+ llm_graph_input_attn_no_cache * inp,
520
+ lm_ggml_cgraph * gf,
521
+ lm_ggml_tensor * wo,
522
+ lm_ggml_tensor * wo_b,
523
+ lm_ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
524
+ lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
525
+ lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
526
+ lm_ggml_tensor * kq_b,
527
+ float kq_scale,
528
+ int il) const;
529
+
530
+ llm_graph_input_attn_kv_unified * build_attn_inp_kv_unified() const;
531
+
532
+ lm_ggml_tensor * build_attn(
533
+ llm_graph_input_attn_kv_unified * inp,
534
+ lm_ggml_cgraph * gf,
535
+ lm_ggml_tensor * wo,
536
+ lm_ggml_tensor * wo_b,
537
+ lm_ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
538
+ lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
539
+ lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
540
+ lm_ggml_tensor * kq_b,
541
+ float kq_scale,
542
+ int il) const;
543
+
544
+ llm_graph_input_attn_cross * build_attn_inp_cross() const;
545
+
546
+ lm_ggml_tensor * build_attn(
547
+ llm_graph_input_attn_cross * inp,
548
+ lm_ggml_cgraph * gf,
549
+ lm_ggml_tensor * wo,
550
+ lm_ggml_tensor * wo_b,
551
+ lm_ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
552
+ lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
553
+ lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
554
+ lm_ggml_tensor * kq_b,
555
+ float kq_scale,
556
+ int il) const;
557
+
558
+ //
559
+ // recurrent
560
+ //
561
+
562
+ lm_ggml_tensor * build_copy_mask_state(
563
+ lm_ggml_cgraph * gf,
564
+ lm_ggml_tensor * s,
565
+ lm_ggml_tensor * state_copy,
566
+ lm_ggml_tensor * state_mask,
567
+ int32_t n_state,
568
+ int32_t n_seqs) const;
569
+
570
+ lm_ggml_tensor * build_rwkv_token_shift_load(
571
+ lm_ggml_cgraph * gf,
572
+ lm_ggml_tensor * state_copy,
573
+ lm_ggml_tensor * state_mask,
574
+ const llama_ubatch & ubatch,
575
+ int il) const;
576
+
577
+ lm_ggml_tensor * build_rwkv_token_shift_store(
578
+ lm_ggml_tensor * token_shift,
579
+ const llama_ubatch & ubatch,
580
+ int il) const;
581
+
582
+ //
583
+ // pooling
584
+ //
585
+
586
+ void build_pooling(
587
+ lm_ggml_cgraph * gf,
588
+ lm_ggml_tensor * cls,
589
+ lm_ggml_tensor * cls_b,
590
+ lm_ggml_tensor * cls_out,
591
+ lm_ggml_tensor * cls_out_b) const;
592
+ };
@@ -69,3 +69,11 @@ uint32_t llama_hparams::n_embd_v_s() const {
69
69
  // corresponds to Mamba's ssm_states size
70
70
  return ssm_d_state * ssm_d_inner;
71
71
  }
72
+
73
+ bool llama_hparams::is_swa(uint32_t il) const {
74
+ if (il < n_layer) {
75
+ return n_swa > 0 && n_swa_pattern > 0 && il % n_swa_pattern < (n_swa_pattern - 1);
76
+ }
77
+
78
+ LM_GGML_ABORT("fatal error");
79
+ }
@@ -36,6 +36,7 @@ struct llama_hparams {
36
36
  uint32_t n_layer;
37
37
  uint32_t n_rot;
38
38
  uint32_t n_swa = 0; // sliding window attention (SWA)
39
+ uint32_t n_swa_pattern = 1; // by default, all layers use non-sliding-window attention
39
40
  uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
40
41
  uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
41
42
  uint32_t n_expert = 0;
@@ -75,10 +76,16 @@ struct llama_hparams {
75
76
  uint32_t time_decay_extra_dim = 0;
76
77
  uint32_t wkv_head_size = 0;
77
78
  uint32_t token_shift_count = 2;
79
+ uint32_t n_lora_decay = 0;
80
+ uint32_t n_lora_iclr = 0;
81
+ uint32_t n_lora_value_res_mix = 0;
82
+ uint32_t n_lora_gate = 0;
78
83
 
79
84
  float rope_attn_factor = 1.0f;
80
85
  float rope_freq_base_train;
86
+ float rope_freq_base_train_swa;
81
87
  float rope_freq_scale_train;
88
+ float rope_freq_scale_train_swa;
82
89
  uint32_t n_ctx_orig_yarn;
83
90
  float rope_yarn_log_mul;
84
91
 
@@ -105,6 +112,14 @@ struct llama_hparams {
105
112
  bool use_alibi = false;
106
113
  bool attn_soft_cap = false;
107
114
 
115
+ uint32_t n_moe_layer_step = 0;
116
+ bool use_kq_norm = true;
117
+ uint32_t n_attn_chunk = 0;
118
+ // values below seems to be fixed on llama4
119
+ uint32_t n_no_rope_layer_step = 4;
120
+ uint32_t n_attn_temp_floor_scale = 8192;
121
+ float f_attn_temp_scale = 0.1;
122
+
108
123
  // needed by encoder-decoder models (e.g. T5, FLAN-T5)
109
124
  // ref: https://github.com/ggerganov/llama.cpp/pull/8141
110
125
  llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
@@ -133,6 +148,8 @@ struct llama_hparams {
133
148
 
134
149
  // dimension of the recurrent state embeddings
135
150
  uint32_t n_embd_v_s() const;
151
+
152
+ bool is_swa(uint32_t il) const;
136
153
  };
137
154
 
138
155
  static_assert(std::is_trivially_copyable<llama_hparams>::value, "llama_hparams must be trivially copyable");
@@ -0,0 +1,15 @@
1
+ #include "llama-io.h"
2
+
3
+ void llama_io_write_i::write_string(const std::string & str) {
4
+ uint32_t str_size = str.size();
5
+
6
+ write(&str_size, sizeof(str_size));
7
+ write(str.data(), str_size);
8
+ }
9
+
10
+ void llama_io_read_i::read_string(std::string & str) {
11
+ uint32_t str_size;
12
+ read_to(&str_size, sizeof(str_size));
13
+
14
+ str.assign((const char *) read(str_size), str_size);
15
+ }
package/cpp/llama-io.h ADDED
@@ -0,0 +1,35 @@
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+ #include <cstdint>
5
+ #include <string>
6
+
7
+ struct lm_ggml_tensor;
8
+
9
+ class llama_io_write_i {
10
+ public:
11
+ llama_io_write_i() = default;
12
+ virtual ~llama_io_write_i() = default;
13
+
14
+ virtual void write(const void * src, size_t size) = 0;
15
+ virtual void write_tensor(const lm_ggml_tensor * tensor, size_t offset, size_t size) = 0;
16
+
17
+ // bytes written so far
18
+ virtual size_t n_bytes() = 0;
19
+
20
+ void write_string(const std::string & str);
21
+ };
22
+
23
+ class llama_io_read_i {
24
+ public:
25
+ llama_io_read_i() = default;
26
+ virtual ~llama_io_read_i() = default;
27
+
28
+ virtual const uint8_t * read(size_t size) = 0;
29
+ virtual void read_to(void * dst, size_t size) = 0;
30
+
31
+ // bytes read so far
32
+ virtual size_t n_bytes() = 0;
33
+
34
+ void read_string(std::string & str);
35
+ };