cui-llama.rn 1.6.1 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/CMakeLists.txt +6 -0
- package/android/src/main/java/com/rnllama/LlamaContext.java +38 -5
- package/android/src/main/java/com/rnllama/RNLlama.java +139 -4
- package/android/src/main/jni.cpp +153 -14
- package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
- package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
- package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +24 -4
- package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +22 -2
- package/cpp/chat.cpp +128 -106
- package/cpp/chat.h +2 -0
- package/cpp/common.cpp +41 -76
- package/cpp/common.h +23 -19
- package/cpp/ggml-backend.cpp +9 -5
- package/cpp/ggml-backend.h +4 -4
- package/cpp/ggml-cpu/ggml-cpu-aarch64.cpp +0 -2
- package/cpp/ggml-cpu/ggml-cpu-quants.c +306 -6
- package/cpp/ggml-cpu/ggml-cpu.c +5 -13
- package/cpp/ggml-cpu/ggml-cpu.cpp +29 -16
- package/cpp/ggml-cpu/ops.cpp +107 -13
- package/cpp/ggml-cpu/vec.cpp +0 -6
- package/cpp/ggml-cpu/vec.h +16 -0
- package/cpp/ggml-llama-sim.metallib +0 -0
- package/cpp/ggml-llama.metallib +0 -0
- package/cpp/ggml-metal-impl.h +36 -11
- package/cpp/ggml-metal.m +321 -132
- package/cpp/ggml-opt.cpp +373 -190
- package/cpp/ggml-opt.h +49 -28
- package/cpp/ggml-quants.c +0 -6
- package/cpp/ggml.c +93 -38
- package/cpp/ggml.h +21 -7
- package/cpp/gguf.cpp +33 -33
- package/cpp/llama-adapter.cpp +6 -0
- package/cpp/llama-arch.cpp +3 -0
- package/cpp/llama-batch.cpp +3 -1
- package/cpp/llama-chat.cpp +8 -6
- package/cpp/llama-chat.h +1 -0
- package/cpp/llama-context.cpp +349 -135
- package/cpp/llama-context.h +30 -3
- package/cpp/llama-cparams.h +1 -0
- package/cpp/llama-graph.cpp +150 -234
- package/cpp/llama-graph.h +52 -7
- package/cpp/llama-hparams.cpp +17 -1
- package/cpp/llama-hparams.h +34 -5
- package/cpp/llama-kv-cache.cpp +662 -321
- package/cpp/llama-kv-cache.h +203 -93
- package/cpp/llama-memory.h +3 -2
- package/cpp/llama-model-loader.cpp +24 -15
- package/cpp/llama-model-saver.cpp +281 -0
- package/cpp/llama-model-saver.h +37 -0
- package/cpp/llama-model.cpp +536 -132
- package/cpp/llama-model.h +7 -1
- package/cpp/llama-sampling.cpp +18 -6
- package/cpp/llama-vocab.cpp +46 -8
- package/cpp/llama-vocab.h +6 -0
- package/cpp/llama.cpp +14 -0
- package/cpp/llama.h +72 -131
- package/cpp/minja/chat-template.hpp +9 -5
- package/cpp/minja/minja.hpp +69 -36
- package/cpp/rn-llama.cpp +611 -47
- package/cpp/rn-llama.h +33 -3
- package/cpp/sampling.cpp +57 -50
- package/cpp/tools/mtmd/clip-impl.h +462 -0
- package/cpp/tools/mtmd/clip.cpp +4024 -0
- package/cpp/tools/mtmd/clip.h +101 -0
- package/cpp/tools/mtmd/miniaudio.h +93468 -0
- package/cpp/tools/mtmd/mtmd-audio.cpp +855 -0
- package/cpp/tools/mtmd/mtmd-audio.h +62 -0
- package/cpp/tools/mtmd/mtmd-helper.cpp +297 -0
- package/cpp/tools/mtmd/mtmd.cpp +942 -0
- package/cpp/tools/mtmd/mtmd.h +362 -0
- package/cpp/tools/mtmd/stb_image.h +7988 -0
- package/ios/CMakeLists.txt +7 -0
- package/ios/RNLlama.mm +77 -3
- package/ios/RNLlamaContext.h +5 -1
- package/ios/RNLlamaContext.mm +105 -10
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +23 -19
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +21 -7
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +30 -3
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +52 -7
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +34 -5
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +203 -93
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +3 -2
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +7 -1
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +72 -131
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +33 -3
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +23 -19
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +21 -7
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +30 -3
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +52 -7
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +34 -5
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +203 -93
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +3 -2
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +7 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +72 -131
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +33 -3
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +23 -19
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +21 -7
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +30 -3
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +52 -7
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +34 -5
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +203 -93
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +3 -2
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +7 -1
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +72 -131
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +33 -3
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +23 -19
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +21 -7
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +30 -3
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +52 -7
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +34 -5
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +203 -93
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +3 -2
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +7 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +72 -131
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +33 -3
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
- package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
- package/jest/mock.js +33 -7
- package/lib/commonjs/NativeRNLlama.js.map +1 -1
- package/lib/commonjs/index.js +153 -21
- package/lib/commonjs/index.js.map +1 -1
- package/lib/module/NativeRNLlama.js.map +1 -1
- package/lib/module/index.js +152 -20
- package/lib/module/index.js.map +1 -1
- package/lib/typescript/NativeRNLlama.d.ts +50 -4
- package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +72 -6
- package/lib/typescript/index.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/NativeRNLlama.ts +67 -4
- package/src/index.ts +212 -38
- package/lib/commonjs/chat.js +0 -37
- package/lib/commonjs/chat.js.map +0 -1
- package/lib/module/chat.js +0 -33
- package/lib/module/chat.js.map +0 -1
- package/lib/typescript/chat.d.ts +0 -10
- package/lib/typescript/chat.d.ts.map +0 -1
- package/src/chat.ts +0 -44
@@ -19,6 +19,7 @@ struct llama_cparams;
|
|
19
19
|
|
20
20
|
class llama_memory_i;
|
21
21
|
class llama_kv_cache_unified;
|
22
|
+
class llama_kv_cache_unified_iswa;
|
22
23
|
class llama_kv_cache_recurrent;
|
23
24
|
|
24
25
|
// certain models (typically multi-modal) can produce different types of graphs
|
@@ -255,6 +256,31 @@ public:
|
|
255
256
|
|
256
257
|
void set_input(const llama_ubatch * ubatch) override;
|
257
258
|
|
259
|
+
lm_ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; }
|
260
|
+
|
261
|
+
lm_ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch]
|
262
|
+
lm_ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch]
|
263
|
+
|
264
|
+
const llama_hparams & hparams;
|
265
|
+
const llama_cparams & cparams;
|
266
|
+
|
267
|
+
const llama_kv_cache_unified * kv_self;
|
268
|
+
};
|
269
|
+
|
270
|
+
class llm_graph_input_attn_kv_unified_iswa : public llm_graph_input_i {
|
271
|
+
public:
|
272
|
+
llm_graph_input_attn_kv_unified_iswa(
|
273
|
+
const llama_hparams & hparams,
|
274
|
+
const llama_cparams & cparams,
|
275
|
+
const llama_kv_cache_unified_iswa * kv_self) :
|
276
|
+
hparams(hparams),
|
277
|
+
cparams(cparams),
|
278
|
+
kv_self(kv_self) {
|
279
|
+
}
|
280
|
+
~llm_graph_input_attn_kv_unified_iswa() = default;
|
281
|
+
|
282
|
+
void set_input(const llama_ubatch * ubatch) override;
|
283
|
+
|
258
284
|
lm_ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; }
|
259
285
|
lm_ggml_tensor * get_kq_mask_swa() const { return self_kq_mask_swa_cnv; }
|
260
286
|
|
@@ -266,7 +292,7 @@ public:
|
|
266
292
|
const llama_hparams & hparams;
|
267
293
|
const llama_cparams & cparams;
|
268
294
|
|
269
|
-
const
|
295
|
+
const llama_kv_cache_unified_iswa * kv_self;
|
270
296
|
};
|
271
297
|
|
272
298
|
class llm_graph_input_attn_cross : public llm_graph_input_i {
|
@@ -298,6 +324,7 @@ class llm_graph_result_i {
|
|
298
324
|
public:
|
299
325
|
virtual ~llm_graph_result_i() = default;
|
300
326
|
|
327
|
+
virtual lm_ggml_tensor * get_tokens() = 0;
|
301
328
|
virtual lm_ggml_tensor * get_logits() = 0;
|
302
329
|
virtual lm_ggml_tensor * get_embd() = 0;
|
303
330
|
virtual lm_ggml_tensor * get_embd_pooled() = 0;
|
@@ -312,6 +339,7 @@ class llm_graph_result : public llm_graph_result_i {
|
|
312
339
|
public:
|
313
340
|
virtual ~llm_graph_result() = default;
|
314
341
|
|
342
|
+
lm_ggml_tensor * get_tokens() override { return t_tokens; }
|
315
343
|
lm_ggml_tensor * get_logits() override { return t_logits; }
|
316
344
|
lm_ggml_tensor * get_embd() override { return t_embd; }
|
317
345
|
lm_ggml_tensor * get_embd_pooled() override { return t_embd_pooled; }
|
@@ -328,6 +356,7 @@ public:
|
|
328
356
|
}
|
329
357
|
|
330
358
|
// important graph nodes
|
359
|
+
lm_ggml_tensor * t_tokens = nullptr;
|
331
360
|
lm_ggml_tensor * t_logits = nullptr;
|
332
361
|
lm_ggml_tensor * t_embd = nullptr;
|
333
362
|
lm_ggml_tensor * t_embd_pooled = nullptr;
|
@@ -375,7 +404,6 @@ struct llm_graph_context {
|
|
375
404
|
const int64_t n_layer;
|
376
405
|
const int64_t n_rot;
|
377
406
|
const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
|
378
|
-
const int64_t n_ctx_per_seq;
|
379
407
|
const int64_t n_head;
|
380
408
|
const int64_t n_head_kv;
|
381
409
|
const int64_t n_embd_head_k;
|
@@ -504,13 +532,12 @@ struct llm_graph_context {
|
|
504
532
|
|
505
533
|
lm_ggml_tensor * build_attn_mha(
|
506
534
|
lm_ggml_cgraph * gf,
|
507
|
-
lm_ggml_tensor * q,
|
508
|
-
lm_ggml_tensor * k,
|
509
|
-
lm_ggml_tensor * v,
|
535
|
+
lm_ggml_tensor * q, // [n_embd_head_q, n_head_q, n_tokens]
|
536
|
+
lm_ggml_tensor * k, // [n_embd_head_k, n_head_k, n_tokens]
|
537
|
+
lm_ggml_tensor * v, // [n_embd_head_v, n_head_v, n_tokens] (v_trans == false)
|
510
538
|
lm_ggml_tensor * kq_b,
|
511
539
|
lm_ggml_tensor * kq_mask,
|
512
|
-
lm_ggml_tensor * v_mla,
|
513
|
-
bool v_trans,
|
540
|
+
lm_ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v]
|
514
541
|
float kq_scale) const;
|
515
542
|
|
516
543
|
llm_graph_input_attn_no_cache * build_attn_inp_no_cache() const;
|
@@ -543,6 +570,21 @@ struct llm_graph_context {
|
|
543
570
|
float kq_scale,
|
544
571
|
int il) const;
|
545
572
|
|
573
|
+
llm_graph_input_attn_kv_unified_iswa * build_attn_inp_kv_unified_iswa() const;
|
574
|
+
|
575
|
+
lm_ggml_tensor * build_attn(
|
576
|
+
llm_graph_input_attn_kv_unified_iswa * inp,
|
577
|
+
lm_ggml_cgraph * gf,
|
578
|
+
lm_ggml_tensor * wo,
|
579
|
+
lm_ggml_tensor * wo_b,
|
580
|
+
lm_ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
|
581
|
+
lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
|
582
|
+
lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
|
583
|
+
lm_ggml_tensor * kq_b,
|
584
|
+
lm_ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v]
|
585
|
+
float kq_scale,
|
586
|
+
int il) const;
|
587
|
+
|
546
588
|
llm_graph_input_attn_cross * build_attn_inp_cross() const;
|
547
589
|
|
548
590
|
lm_ggml_tensor * build_attn(
|
@@ -593,3 +635,6 @@ struct llm_graph_context {
|
|
593
635
|
lm_ggml_tensor * cls_out,
|
594
636
|
lm_ggml_tensor * cls_out_b) const;
|
595
637
|
};
|
638
|
+
|
639
|
+
// TODO: better name
|
640
|
+
int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional);
|
@@ -14,6 +14,12 @@ enum llama_expert_gating_func_type {
|
|
14
14
|
LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID = 2,
|
15
15
|
};
|
16
16
|
|
17
|
+
enum llama_swa_type {
|
18
|
+
LLAMA_SWA_TYPE_NONE = 0,
|
19
|
+
LLAMA_SWA_TYPE_STANDARD = 1,
|
20
|
+
LLAMA_SWA_TYPE_CHUNKED = 2,
|
21
|
+
};
|
22
|
+
|
17
23
|
struct llama_hparams_posnet {
|
18
24
|
uint32_t n_embd;
|
19
25
|
uint32_t n_layer;
|
@@ -35,8 +41,6 @@ struct llama_hparams {
|
|
35
41
|
uint32_t n_embd_features = 0;
|
36
42
|
uint32_t n_layer;
|
37
43
|
uint32_t n_rot;
|
38
|
-
uint32_t n_swa = 0; // sliding window attention (SWA)
|
39
|
-
uint32_t n_swa_pattern = 1; // by default, all layers use non-sliding-window attention
|
40
44
|
uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
|
41
45
|
uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
|
42
46
|
uint32_t n_expert = 0;
|
@@ -96,6 +100,15 @@ struct llama_hparams {
|
|
96
100
|
|
97
101
|
std::array<int, 4> rope_sections;
|
98
102
|
|
103
|
+
// Sliding Window Attention (SWA)
|
104
|
+
llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
|
105
|
+
// the size of the sliding window (0 - no SWA)
|
106
|
+
uint32_t n_swa = 0;
|
107
|
+
// if swa_layers[il] == true, then layer il is SWA
|
108
|
+
// if swa_layers[il] == false, then layer il is dense (i.e. non-SWA)
|
109
|
+
// by default, all layers are dense
|
110
|
+
std::array<bool, LLAMA_MAX_LAYERS> swa_layers;
|
111
|
+
|
99
112
|
// for State Space Models
|
100
113
|
uint32_t ssm_d_conv = 0;
|
101
114
|
uint32_t ssm_d_inner = 0;
|
@@ -116,11 +129,10 @@ struct llama_hparams {
|
|
116
129
|
bool causal_attn = true;
|
117
130
|
bool use_alibi = false;
|
118
131
|
bool attn_soft_cap = false;
|
132
|
+
bool use_kq_norm = true;
|
119
133
|
|
134
|
+
// llama4
|
120
135
|
uint32_t n_moe_layer_step = 0;
|
121
|
-
bool use_kq_norm = true;
|
122
|
-
uint32_t n_attn_chunk = 0;
|
123
|
-
// values below seems to be fixed on llama4
|
124
136
|
uint32_t n_no_rope_layer_step = 4;
|
125
137
|
uint32_t n_attn_temp_floor_scale = 8192;
|
126
138
|
float f_attn_temp_scale = 0.1;
|
@@ -133,6 +145,23 @@ struct llama_hparams {
|
|
133
145
|
enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
|
134
146
|
enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
|
135
147
|
|
148
|
+
// this value n_pattern means that every nth layer is dense (i.e. non-SWA)
|
149
|
+
// note that if n_pattern == 0, all layers are SWA
|
150
|
+
// if n_pattern == 1, all layers are dense
|
151
|
+
// example: n_pattern = 3
|
152
|
+
// il == 0: swa
|
153
|
+
// il == 1: swa
|
154
|
+
// il == 2: dense
|
155
|
+
// il == 3: swa
|
156
|
+
// il == 4: swa
|
157
|
+
// il == 5: dense
|
158
|
+
// il == 6: swa
|
159
|
+
// etc ...
|
160
|
+
void set_swa_pattern(uint32_t n_pattern);
|
161
|
+
|
162
|
+
// return true if one of the layers is SWA
|
163
|
+
bool is_swa_any() const;
|
164
|
+
|
136
165
|
uint32_t n_head(uint32_t il = 0) const;
|
137
166
|
|
138
167
|
uint32_t n_head_kv(uint32_t il = 0) const;
|
@@ -8,6 +8,7 @@
|
|
8
8
|
#include "ggml-cpp.h"
|
9
9
|
|
10
10
|
#include <set>
|
11
|
+
#include <unordered_map>
|
11
12
|
#include <vector>
|
12
13
|
|
13
14
|
struct llama_cparams;
|
@@ -40,6 +41,9 @@ struct llama_kv_cache : public llama_memory_i {
|
|
40
41
|
// batch processing
|
41
42
|
//
|
42
43
|
|
44
|
+
// =============================================================================================================
|
45
|
+
// TODO: refactor and simplify this
|
46
|
+
|
43
47
|
virtual llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) = 0;
|
44
48
|
|
45
49
|
// different KV caches require different batch splitting strategies
|
@@ -48,11 +52,10 @@ struct llama_kv_cache : public llama_memory_i {
|
|
48
52
|
// find an empty slot of size "n_tokens" in the cache
|
49
53
|
virtual bool find_slot(const llama_ubatch & batch) = 0;
|
50
54
|
|
55
|
+
// =============================================================================================================
|
56
|
+
|
51
57
|
// getters
|
52
|
-
virtual
|
53
|
-
virtual int32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache
|
54
|
-
virtual llama_pos get_pos_max() const = 0;
|
55
|
-
virtual bool get_can_shift() const = 0;
|
58
|
+
virtual bool get_can_shift() const = 0;
|
56
59
|
|
57
60
|
bool get_can_edit() const override { return get_can_shift(); }
|
58
61
|
|
@@ -87,38 +90,25 @@ private:
|
|
87
90
|
// llama_kv_cache_unified
|
88
91
|
//
|
89
92
|
|
90
|
-
// TODO: add notion of max sequences
|
91
93
|
class llama_kv_cache_unified : public llama_kv_cache {
|
92
94
|
public:
|
93
|
-
struct kv_cell {
|
94
|
-
llama_pos pos = -1;
|
95
|
-
llama_pos delta = 0;
|
96
|
-
|
97
|
-
std::set<llama_seq_id> seq_id;
|
98
|
-
|
99
|
-
bool has_seq_id(const llama_seq_id & id) const {
|
100
|
-
return seq_id.find(id) != seq_id.end();
|
101
|
-
}
|
102
|
-
|
103
|
-
bool is_empty() const {
|
104
|
-
return seq_id.empty();
|
105
|
-
}
|
106
|
-
|
107
|
-
bool is_same_seq(const kv_cell & other) const {
|
108
|
-
return seq_id == other.seq_id;
|
109
|
-
}
|
110
|
-
};
|
111
|
-
|
112
95
|
static uint32_t get_padding(const llama_cparams & cparams);
|
113
96
|
|
97
|
+
// this callback is used to filter out layers that should not be included in the cache
|
98
|
+
using layer_filter_cb = std::function<bool(int32_t il)>;
|
99
|
+
|
114
100
|
llama_kv_cache_unified(
|
115
|
-
const llama_model &
|
116
|
-
|
117
|
-
lm_ggml_type
|
118
|
-
|
119
|
-
bool
|
120
|
-
|
121
|
-
uint32_t
|
101
|
+
const llama_model & model,
|
102
|
+
layer_filter_cb && filter,
|
103
|
+
lm_ggml_type type_k,
|
104
|
+
lm_ggml_type type_v,
|
105
|
+
bool v_trans,
|
106
|
+
bool offload,
|
107
|
+
uint32_t kv_size,
|
108
|
+
uint32_t n_seq_max,
|
109
|
+
uint32_t n_pad,
|
110
|
+
uint32_t n_swa,
|
111
|
+
llama_swa_type swa_type);
|
122
112
|
|
123
113
|
~llama_kv_cache_unified() = default;
|
124
114
|
|
@@ -130,10 +120,11 @@ public:
|
|
130
120
|
|
131
121
|
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
|
132
122
|
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
|
133
|
-
void seq_keep(llama_seq_id seq_id)
|
123
|
+
void seq_keep(llama_seq_id seq_id) override;
|
134
124
|
void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
|
135
125
|
void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
|
136
126
|
|
127
|
+
llama_pos seq_pos_min(llama_seq_id seq_id) const override;
|
137
128
|
llama_pos seq_pos_max(llama_seq_id seq_id) const override;
|
138
129
|
|
139
130
|
//
|
@@ -150,7 +141,6 @@ public:
|
|
150
141
|
void set_full() override;
|
151
142
|
|
152
143
|
llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
|
153
|
-
|
154
144
|
llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
|
155
145
|
|
156
146
|
// updates the cache head
|
@@ -158,53 +148,106 @@ public:
|
|
158
148
|
// to the first cell of the slot.
|
159
149
|
bool find_slot(const llama_ubatch & batch) override;
|
160
150
|
|
161
|
-
int32_t get_n_tokens() const override;
|
162
|
-
int32_t get_used_cells() const override;
|
163
|
-
|
164
|
-
// TODO: better data structures to reduce the cost of this operation
|
165
|
-
llama_pos get_pos_max() const override;
|
166
|
-
|
167
151
|
bool get_can_shift() const override;
|
168
152
|
|
169
153
|
// state write/load
|
170
154
|
|
171
155
|
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
|
172
|
-
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1)
|
156
|
+
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
|
173
157
|
|
174
|
-
//
|
175
|
-
//
|
176
|
-
//
|
177
|
-
uint32_t head = 0;
|
178
|
-
uint32_t size = 0;
|
179
|
-
uint32_t used = 0; // used cells (i.e. at least one seq_id)
|
158
|
+
//
|
159
|
+
// llama_kv_cache_unified specific API
|
160
|
+
//
|
180
161
|
|
181
|
-
|
182
|
-
uint32_t
|
162
|
+
uint32_t get_n() const;
|
163
|
+
uint32_t get_size() const;
|
183
164
|
|
184
|
-
|
165
|
+
// get views of the current state of the cache
|
166
|
+
lm_ggml_tensor * get_k(lm_ggml_context * ctx, int32_t il) const;
|
167
|
+
lm_ggml_tensor * get_v(lm_ggml_context * ctx, int32_t il) const;
|
185
168
|
|
186
|
-
|
187
|
-
|
169
|
+
// store k_cur and v_cur in the cache based on the current head location
|
170
|
+
lm_ggml_tensor * cpy_k(lm_ggml_context * ctx, lm_ggml_tensor * k_cur, int32_t il) const;
|
171
|
+
lm_ggml_tensor * cpy_v(lm_ggml_context * ctx, lm_ggml_tensor * v_cur, int32_t il) const;
|
172
|
+
|
173
|
+
void prune_swa(llama_seq_id seq_id, llama_pos pmin, llama_pos pmax);
|
174
|
+
|
175
|
+
void set_input_kq_mask (lm_ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
|
176
|
+
void set_input_k_shift (lm_ggml_tensor * dst) const;
|
177
|
+
void set_input_pos_bucket(lm_ggml_tensor * dst, const llama_ubatch * ubatch) const;
|
188
178
|
|
189
179
|
private:
|
190
180
|
const llama_model & model;
|
191
181
|
const llama_hparams & hparams;
|
192
182
|
|
183
|
+
struct kv_cell {
|
184
|
+
llama_pos pos = -1;
|
185
|
+
llama_pos delta = 0;
|
186
|
+
|
187
|
+
// TODO: replace with bitset uint64_t
|
188
|
+
std::set<llama_seq_id> seq_id;
|
189
|
+
|
190
|
+
bool has_seq_id(const llama_seq_id & id) const {
|
191
|
+
return seq_id.find(id) != seq_id.end();
|
192
|
+
}
|
193
|
+
|
194
|
+
bool is_empty() const {
|
195
|
+
return seq_id.empty();
|
196
|
+
}
|
197
|
+
|
198
|
+
bool is_same_seq(const kv_cell & other) const {
|
199
|
+
return seq_id == other.seq_id;
|
200
|
+
}
|
201
|
+
};
|
202
|
+
|
203
|
+
struct kv_layer {
|
204
|
+
// layer index in the model
|
205
|
+
// note: can be different from the layer index in the KV cache
|
206
|
+
uint32_t il;
|
207
|
+
|
208
|
+
lm_ggml_tensor * k;
|
209
|
+
lm_ggml_tensor * v;
|
210
|
+
};
|
211
|
+
|
193
212
|
bool has_shift = false;
|
194
213
|
bool do_defrag = false;
|
195
|
-
|
196
214
|
bool v_trans = true; // the value tensor is transposed
|
197
|
-
|
215
|
+
|
216
|
+
uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
|
217
|
+
uint32_t size = 0; // total number of cells, shared across all sequences
|
218
|
+
uint32_t used = 0; // used cells (i.e. at least one seq_id) (TODO: add `struct kv_cells` and keep track automaticallt)
|
219
|
+
|
220
|
+
// computed before each graph build
|
221
|
+
uint32_t n = 0;
|
222
|
+
|
223
|
+
const uint32_t n_seq_max = 1;
|
198
224
|
|
199
225
|
// required padding
|
200
|
-
uint32_t
|
226
|
+
const uint32_t n_pad = 1;
|
227
|
+
|
228
|
+
// SWA
|
229
|
+
const uint32_t n_swa = 0;
|
201
230
|
|
202
|
-
|
203
|
-
lm_ggml_type type_v = LM_GGML_TYPE_F16;
|
231
|
+
const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
|
204
232
|
|
205
233
|
std::vector<lm_ggml_context_ptr> ctxs;
|
206
234
|
std::vector<lm_ggml_backend_buffer_ptr> bufs;
|
207
235
|
|
236
|
+
std::vector<kv_cell> cells; // TODO: replace with `struct kv_cells`
|
237
|
+
std::vector<kv_layer> layers;
|
238
|
+
|
239
|
+
// model layer id -> KV cache layer id
|
240
|
+
std::unordered_map<int32_t, int32_t> map_layer_ids;
|
241
|
+
|
242
|
+
// recovery information used to restore the KV cells to their original state in case of a failure
|
243
|
+
struct {
|
244
|
+
void clear() {
|
245
|
+
cells.clear();
|
246
|
+
}
|
247
|
+
|
248
|
+
std::unordered_map<uint32_t, kv_cell> cells;
|
249
|
+
} recovery;
|
250
|
+
|
208
251
|
// defrag
|
209
252
|
struct {
|
210
253
|
std::vector<uint32_t> ids;
|
@@ -213,17 +256,6 @@ private:
|
|
213
256
|
// return true if cells have been moved
|
214
257
|
bool defrag_prepare(int32_t n_max_nodes);
|
215
258
|
|
216
|
-
// commit/restore cache
|
217
|
-
struct slot_range {
|
218
|
-
uint32_t c0 = 0; // note: these are cell indices, not sequence positions
|
219
|
-
uint32_t c1 = 0;
|
220
|
-
};
|
221
|
-
|
222
|
-
// pending cell updates that are not yet committed
|
223
|
-
struct {
|
224
|
-
std::vector<slot_range> ranges;
|
225
|
-
} pending;
|
226
|
-
|
227
259
|
// find how many cells are currently in use
|
228
260
|
uint32_t cell_max() const;
|
229
261
|
|
@@ -232,6 +264,8 @@ private:
|
|
232
264
|
size_t size_k_bytes() const;
|
233
265
|
size_t size_v_bytes() const;
|
234
266
|
|
267
|
+
bool is_masked_swa(llama_pos p0, llama_pos p1) const;
|
268
|
+
|
235
269
|
lm_ggml_tensor * build_rope_shift(
|
236
270
|
const llama_cparams & cparams,
|
237
271
|
lm_ggml_context * ctx,
|
@@ -258,6 +292,100 @@ private:
|
|
258
292
|
bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
|
259
293
|
};
|
260
294
|
|
295
|
+
//
|
296
|
+
// llama_kv_cache_unified_iswa
|
297
|
+
//
|
298
|
+
|
299
|
+
// utilizes two instances of llama_kv_cache_unified
|
300
|
+
// the first instance is for the non-SWA layers of the model and the second instance is for the SWA layers
|
301
|
+
// upon successful commit, the SWA cache removes old tokens outside the n_swa window
|
302
|
+
|
303
|
+
class llama_kv_cache_unified_iswa : public llama_kv_cache {
|
304
|
+
public:
|
305
|
+
llama_kv_cache_unified_iswa(
|
306
|
+
const llama_model & model,
|
307
|
+
lm_ggml_type type_k,
|
308
|
+
lm_ggml_type type_v,
|
309
|
+
bool v_trans,
|
310
|
+
bool offload,
|
311
|
+
bool swa_full,
|
312
|
+
uint32_t kv_size,
|
313
|
+
uint32_t n_seq_max,
|
314
|
+
uint32_t n_batch,
|
315
|
+
uint32_t n_pad);
|
316
|
+
|
317
|
+
~llama_kv_cache_unified_iswa() = default;
|
318
|
+
|
319
|
+
//
|
320
|
+
// llama_memory_i
|
321
|
+
//
|
322
|
+
|
323
|
+
void clear() override;
|
324
|
+
|
325
|
+
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
|
326
|
+
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
|
327
|
+
void seq_keep(llama_seq_id seq_id) override;
|
328
|
+
void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
|
329
|
+
void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
|
330
|
+
|
331
|
+
llama_pos seq_pos_min(llama_seq_id seq_id) const override;
|
332
|
+
llama_pos seq_pos_max(llama_seq_id seq_id) const override;
|
333
|
+
|
334
|
+
//
|
335
|
+
// llama_kv_cache
|
336
|
+
//
|
337
|
+
|
338
|
+
void restore() override;
|
339
|
+
void commit() override;
|
340
|
+
|
341
|
+
bool update(llama_context & ctx) override;
|
342
|
+
|
343
|
+
void defrag_sched(float thold) override;
|
344
|
+
|
345
|
+
void set_full() override;
|
346
|
+
|
347
|
+
llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
|
348
|
+
llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
|
349
|
+
|
350
|
+
bool find_slot(const llama_ubatch & batch) override;
|
351
|
+
|
352
|
+
bool get_can_shift() const override;
|
353
|
+
|
354
|
+
// state write/load
|
355
|
+
|
356
|
+
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
|
357
|
+
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
|
358
|
+
|
359
|
+
//
|
360
|
+
// llama_kv_cache_unified_iswa specific API
|
361
|
+
//
|
362
|
+
|
363
|
+
llama_kv_cache_unified * get_kv_base() const;
|
364
|
+
llama_kv_cache_unified * get_kv_swa () const;
|
365
|
+
|
366
|
+
private:
|
367
|
+
const llama_hparams & hparams;
|
368
|
+
|
369
|
+
bool do_prune = true;
|
370
|
+
|
371
|
+
struct {
|
372
|
+
struct entry {
|
373
|
+
llama_pos pmin;
|
374
|
+
llama_pos pmax;
|
375
|
+
};
|
376
|
+
|
377
|
+
void clear() {
|
378
|
+
pos.clear();
|
379
|
+
}
|
380
|
+
|
381
|
+
// used to perform SWA pruning of old tokens
|
382
|
+
std::unordered_map<llama_seq_id, entry> pos;
|
383
|
+
} pending;
|
384
|
+
|
385
|
+
std::unique_ptr<llama_kv_cache_unified> kv_base;
|
386
|
+
std::unique_ptr<llama_kv_cache_unified> kv_swa;
|
387
|
+
};
|
388
|
+
|
261
389
|
//
|
262
390
|
// llama_kv_cache_recurrent
|
263
391
|
//
|
@@ -289,7 +417,8 @@ public:
|
|
289
417
|
lm_ggml_type type_k,
|
290
418
|
lm_ggml_type type_v,
|
291
419
|
bool offload,
|
292
|
-
uint32_t kv_size
|
420
|
+
uint32_t kv_size,
|
421
|
+
uint32_t n_seq_max);
|
293
422
|
|
294
423
|
~llama_kv_cache_recurrent() = default;
|
295
424
|
|
@@ -301,10 +430,11 @@ public:
|
|
301
430
|
|
302
431
|
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
|
303
432
|
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
|
304
|
-
void seq_keep(llama_seq_id seq_id)
|
433
|
+
void seq_keep(llama_seq_id seq_id) override;
|
305
434
|
void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
|
306
435
|
void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
|
307
436
|
|
437
|
+
llama_pos seq_pos_min(llama_seq_id seq_id) const override;
|
308
438
|
llama_pos seq_pos_max(llama_seq_id seq_id) const override;
|
309
439
|
|
310
440
|
//
|
@@ -314,24 +444,17 @@ public:
|
|
314
444
|
void restore() override;
|
315
445
|
void commit() override;
|
316
446
|
|
317
|
-
bool update(llama_context &
|
447
|
+
bool update(llama_context & ctx) override;
|
318
448
|
|
319
449
|
void defrag_sched(float thold) override;
|
320
450
|
|
321
451
|
void set_full() override;
|
322
452
|
|
323
453
|
llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
|
324
|
-
|
325
454
|
llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
|
326
455
|
|
327
456
|
bool find_slot(const llama_ubatch & batch) override;
|
328
457
|
|
329
|
-
int32_t get_n_tokens() const override;
|
330
|
-
int32_t get_used_cells() const override;
|
331
|
-
|
332
|
-
// TODO: better data structures to reduce the cost of this operation
|
333
|
-
llama_pos get_pos_max() const override;
|
334
|
-
|
335
458
|
bool get_can_shift() const override;
|
336
459
|
|
337
460
|
// TODO: temporary methods - they are not really const as they do const_cast<>, fix this
|
@@ -343,11 +466,8 @@ public:
|
|
343
466
|
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
|
344
467
|
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
|
345
468
|
|
346
|
-
//
|
347
|
-
|
348
|
-
// cannot be freely changed after a slot has been allocated.
|
349
|
-
uint32_t head = 0;
|
350
|
-
uint32_t size = 0;
|
469
|
+
uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
|
470
|
+
uint32_t size = 0; // total number of cells, shared across all sequences
|
351
471
|
uint32_t used = 0; // used cells (i.e. at least one seq_id)
|
352
472
|
|
353
473
|
// computed before each graph build
|
@@ -374,8 +494,7 @@ private:
|
|
374
494
|
std::vector<slot_range> ranges;
|
375
495
|
} pending;
|
376
496
|
|
377
|
-
|
378
|
-
lm_ggml_type type_v = LM_GGML_TYPE_F16;
|
497
|
+
const uint32_t n_seq_max = 1;
|
379
498
|
|
380
499
|
std::vector<lm_ggml_context_ptr> ctxs;
|
381
500
|
std::vector<lm_ggml_backend_buffer_ptr> bufs;
|
@@ -394,12 +513,3 @@ private:
|
|
394
513
|
bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
|
395
514
|
bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
|
396
515
|
};
|
397
|
-
|
398
|
-
|
399
|
-
//
|
400
|
-
// kv cache view
|
401
|
-
//
|
402
|
-
|
403
|
-
llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max);
|
404
|
-
|
405
|
-
void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv);
|
@@ -7,8 +7,8 @@ struct llama_memory_params {
|
|
7
7
|
lm_ggml_type type_k;
|
8
8
|
lm_ggml_type type_v;
|
9
9
|
|
10
|
-
//
|
11
|
-
|
10
|
+
// use full-size SWA cache
|
11
|
+
bool swa_full;
|
12
12
|
};
|
13
13
|
|
14
14
|
// general concept of LLM memory
|
@@ -25,6 +25,7 @@ public:
|
|
25
25
|
virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) = 0;
|
26
26
|
virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0;
|
27
27
|
|
28
|
+
virtual llama_pos seq_pos_min(llama_seq_id seq_id) const = 0;
|
28
29
|
virtual llama_pos seq_pos_max(llama_seq_id seq_id) const = 0;
|
29
30
|
|
30
31
|
virtual bool get_can_edit() const = 0;
|