cactus-react-native 0.0.1 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.txt +20 -0
- package/README.md +3 -1
- package/android/src/main/CMakeLists.txt +58 -23
- package/android/src/main/java/com/cactus/Cactus.java +484 -16
- package/android/src/main/java/com/cactus/LlamaContext.java +199 -0
- package/android/src/main/jni.cpp +325 -10
- package/android/src/main/jniLibs/arm64-v8a/libcactus.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_dotprod.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_dotprod_i8mm.so +0 -0
- package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_i8mm.so +0 -0
- package/android/src/main/jniLibs/x86_64/libcactus.so +0 -0
- package/android/src/main/jniLibs/x86_64/libcactus_x86_64.so +0 -0
- package/android/src/newarch/java/com/cactus/CactusModule.java +79 -7
- package/android/src/oldarch/java/com/cactus/CactusModule.java +70 -0
- package/cactus-react-native.podspec +0 -3
- package/ios/CMakeLists.txt +58 -36
- package/ios/Cactus.mm +243 -2
- package/ios/CactusContext.h +22 -0
- package/ios/CactusContext.mm +176 -1
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/cactus.h +92 -5
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/cactus_ffi.h +268 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/chat.h +2 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/common.h +42 -51
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-backend.h +4 -4
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-common.h +12 -6
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpp.h +1 -1
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu.h +5 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-impl.h +52 -18
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-metal-impl.h +106 -14
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-opt.h +49 -28
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml.h +87 -106
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-arch.h +16 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-batch.h +2 -1
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-chat.h +7 -2
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-context.h +44 -33
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-cparams.h +1 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-graph.h +83 -17
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-hparams.h +44 -2
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-kv-cache.h +407 -179
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-memory.h +13 -2
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-model-loader.h +5 -3
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-model-saver.h +37 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-model.h +24 -2
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-vocab.h +6 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama.h +102 -142
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/minja/chat-template.hpp +23 -11
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/minja/minja.hpp +186 -127
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Info.plist +0 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/cactus +0 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/ggml-llama.metallib +0 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/cactus.h +92 -5
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/cactus_ffi.h +268 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/chat.h +2 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/common.h +42 -51
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-backend.h +4 -4
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-common.h +12 -6
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpp.h +1 -1
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu.h +5 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-impl.h +52 -18
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-metal-impl.h +106 -14
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-opt.h +49 -28
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml.h +87 -106
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-arch.h +16 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-batch.h +2 -1
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-chat.h +7 -2
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-context.h +44 -33
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-cparams.h +1 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-graph.h +83 -17
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-hparams.h +44 -2
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-kv-cache.h +407 -179
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-memory.h +13 -2
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-loader.h +5 -3
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-saver.h +37 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-model.h +24 -2
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-vocab.h +6 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama.h +102 -142
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/minja/chat-template.hpp +23 -11
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/minja/minja.hpp +186 -127
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Info.plist +0 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/_CodeSignature/CodeResources +1 -1
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/ggml-llama-sim.metallib +0 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/cactus.h +92 -5
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/cactus_ffi.h +268 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/chat.h +2 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/common.h +42 -51
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-backend.h +4 -4
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-common.h +12 -6
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpp.h +1 -1
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu.h +5 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-impl.h +52 -18
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-metal-impl.h +106 -14
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-opt.h +49 -28
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml.h +87 -106
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-arch.h +16 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-batch.h +2 -1
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-chat.h +7 -2
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-context.h +44 -33
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-cparams.h +1 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-graph.h +83 -17
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-hparams.h +44 -2
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-kv-cache.h +407 -179
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-memory.h +13 -2
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-model-loader.h +5 -3
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-model-saver.h +37 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-model.h +24 -2
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-vocab.h +6 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama.h +102 -142
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/minja/chat-template.hpp +23 -11
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/minja/minja.hpp +186 -127
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Info.plist +0 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/cactus +0 -0
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/ggml-llama.metallib +0 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/cactus.h +92 -5
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/cactus_ffi.h +268 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/chat.h +2 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/common.h +42 -51
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-backend.h +4 -4
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-common.h +12 -6
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpp.h +1 -1
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu.h +5 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-impl.h +52 -18
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-metal-impl.h +106 -14
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-opt.h +49 -28
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml.h +87 -106
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-arch.h +16 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-batch.h +2 -1
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-chat.h +7 -2
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-context.h +44 -33
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-cparams.h +1 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-graph.h +83 -17
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-hparams.h +44 -2
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-kv-cache.h +407 -179
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-memory.h +13 -2
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-loader.h +5 -3
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-saver.h +37 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-model.h +24 -2
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-vocab.h +6 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama.h +102 -142
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/minja/chat-template.hpp +23 -11
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/minja/minja.hpp +186 -127
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Info.plist +0 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/_CodeSignature/CodeResources +1 -1
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/ggml-llama-sim.metallib +0 -0
- package/lib/commonjs/NativeCactus.js +1 -0
- package/lib/commonjs/NativeCactus.js.map +1 -1
- package/lib/commonjs/index.js +112 -0
- package/lib/commonjs/index.js.map +1 -1
- package/lib/commonjs/tools.js +118 -0
- package/lib/commonjs/tools.js.map +1 -0
- package/lib/module/NativeCactus.js +3 -0
- package/lib/module/NativeCactus.js.map +1 -1
- package/lib/module/index.js +87 -1
- package/lib/module/index.js.map +1 -1
- package/lib/module/tools.js +110 -0
- package/lib/module/tools.js.map +1 -0
- package/lib/typescript/NativeCactus.d.ts +30 -1
- package/lib/typescript/NativeCactus.d.ts.map +1 -1
- package/lib/typescript/index.d.ts +21 -2
- package/lib/typescript/index.d.ts.map +1 -1
- package/lib/typescript/tools.d.ts +38 -0
- package/lib/typescript/tools.d.ts.map +1 -0
- package/package.json +6 -3
- package/src/NativeCactus.ts +62 -1
- package/src/index.ts +113 -2
- package/src/tools.ts +127 -0
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-impl.h +0 -531
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/sgemm.h +0 -14
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-impl.h +0 -531
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/sgemm.h +0 -14
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-impl.h +0 -531
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/sgemm.h +0 -14
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-aarch64.h +0 -8
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-impl.h +0 -531
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-quants.h +0 -63
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-traits.h +0 -38
- package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/sgemm.h +0 -14
package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-kv-cache.h
CHANGED
|
@@ -2,174 +2,289 @@
|
|
|
2
2
|
|
|
3
3
|
#include "llama.h"
|
|
4
4
|
#include "llama-io.h"
|
|
5
|
+
#include "llama-graph.h"
|
|
5
6
|
#include "llama-memory.h"
|
|
6
7
|
|
|
7
8
|
#include "ggml-cpp.h"
|
|
8
9
|
|
|
9
|
-
#include <functional>
|
|
10
10
|
#include <set>
|
|
11
|
+
#include <unordered_map>
|
|
11
12
|
#include <vector>
|
|
12
13
|
|
|
13
14
|
struct llama_cparams;
|
|
14
15
|
struct llama_hparams;
|
|
15
16
|
struct llama_ubatch;
|
|
17
|
+
struct llama_sbatch;
|
|
18
|
+
struct llama_model;
|
|
19
|
+
struct llama_context;
|
|
16
20
|
|
|
17
21
|
struct llama_kv_cache : public llama_memory_i {
|
|
18
|
-
|
|
22
|
+
virtual ~llama_kv_cache() = default;
|
|
19
23
|
|
|
20
|
-
|
|
21
|
-
virtual
|
|
24
|
+
// call if batch processing fails - restores the cache state
|
|
25
|
+
virtual void restore() = 0;
|
|
22
26
|
|
|
27
|
+
// call after successful batch processing - clears any pending state
|
|
28
|
+
virtual void commit() = 0;
|
|
29
|
+
|
|
30
|
+
// process any pending defrag/shift/etc. operations
|
|
31
|
+
// optionally call once before processing a new batch
|
|
32
|
+
virtual bool update(llama_context & lctx) = 0;
|
|
33
|
+
|
|
34
|
+
// schedule a defrag if the fragmentation threshold is exceeded. otherwise, do nothing
|
|
35
|
+
virtual void defrag_sched(float thold) = 0;
|
|
36
|
+
|
|
37
|
+
// simulate full cache, used for allocating worst-case compute buffers
|
|
38
|
+
virtual void set_full() = 0;
|
|
39
|
+
|
|
40
|
+
//
|
|
41
|
+
// batch processing
|
|
42
|
+
//
|
|
43
|
+
|
|
44
|
+
// =============================================================================================================
|
|
45
|
+
// TODO: refactor and simplify this
|
|
46
|
+
|
|
47
|
+
virtual llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) = 0;
|
|
48
|
+
|
|
49
|
+
// different KV caches require different batch splitting strategies
|
|
50
|
+
virtual llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const = 0;
|
|
51
|
+
|
|
52
|
+
// find an empty slot of size "n_tokens" in the cache
|
|
53
|
+
virtual bool find_slot(const llama_ubatch & batch) = 0;
|
|
54
|
+
|
|
55
|
+
// =============================================================================================================
|
|
56
|
+
|
|
57
|
+
// getters
|
|
23
58
|
virtual bool get_can_shift() const = 0;
|
|
24
59
|
|
|
25
60
|
bool get_can_edit() const override { return get_can_shift(); }
|
|
61
|
+
|
|
62
|
+
//
|
|
63
|
+
// state write/read
|
|
64
|
+
//
|
|
65
|
+
|
|
66
|
+
virtual void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const = 0;
|
|
67
|
+
virtual void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) = 0;
|
|
26
68
|
};
|
|
27
69
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
int32_t src = -1; // used by recurrent state models to copy states
|
|
32
|
-
int32_t tail = -1;
|
|
70
|
+
//
|
|
71
|
+
// llama_kv_cache_guard
|
|
72
|
+
//
|
|
33
73
|
|
|
34
|
-
|
|
74
|
+
struct llama_kv_cache_guard {
|
|
75
|
+
llama_kv_cache_guard(llama_kv_cache * kv) : kv(kv) {}
|
|
35
76
|
|
|
36
|
-
|
|
37
|
-
|
|
77
|
+
~llama_kv_cache_guard() {
|
|
78
|
+
kv->restore();
|
|
38
79
|
}
|
|
39
80
|
|
|
40
|
-
|
|
41
|
-
|
|
81
|
+
void commit() {
|
|
82
|
+
kv->commit();
|
|
42
83
|
}
|
|
43
84
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
}
|
|
85
|
+
private:
|
|
86
|
+
llama_kv_cache * kv;
|
|
47
87
|
};
|
|
48
88
|
|
|
49
|
-
//
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
bool found = false; // the slot was found
|
|
53
|
-
|
|
54
|
-
explicit llama_kv_cache_slot_info(bool found_) : found{found_} {}
|
|
55
|
-
llama_kv_cache_slot_info(uint32_t begin, uint32_t end) : boundaries{begin, end}, found{true} {}
|
|
56
|
-
|
|
57
|
-
operator bool() const { return found; }
|
|
58
|
-
};
|
|
89
|
+
//
|
|
90
|
+
// llama_kv_cache_unified
|
|
91
|
+
//
|
|
59
92
|
|
|
60
|
-
// ring-buffer of cached KV data
|
|
61
|
-
// TODO: pimpl
|
|
62
|
-
// TODO: add notion of max sequences
|
|
63
93
|
class llama_kv_cache_unified : public llama_kv_cache {
|
|
64
94
|
public:
|
|
65
|
-
|
|
66
|
-
struct callbacks {
|
|
67
|
-
std::function<lm_ggml_tensor * (uint32_t n_ctx_per_seq, int il)> get_rope_factors;
|
|
68
|
-
};
|
|
69
|
-
|
|
70
|
-
llama_kv_cache_unified(
|
|
71
|
-
const llama_hparams & hparams,
|
|
72
|
-
callbacks cbs);
|
|
95
|
+
static uint32_t get_padding(const llama_cparams & cparams);
|
|
73
96
|
|
|
74
|
-
|
|
97
|
+
// this callback is used to filter out layers that should not be included in the cache
|
|
98
|
+
using layer_filter_cb = std::function<bool(int32_t il)>;
|
|
75
99
|
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
lm_ggml_type
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
100
|
+
llama_kv_cache_unified(
|
|
101
|
+
const llama_model & model,
|
|
102
|
+
layer_filter_cb && filter,
|
|
103
|
+
lm_ggml_type type_k,
|
|
104
|
+
lm_ggml_type type_v,
|
|
105
|
+
bool v_trans,
|
|
106
|
+
bool offload,
|
|
107
|
+
uint32_t kv_size,
|
|
108
|
+
uint32_t n_seq_max,
|
|
109
|
+
uint32_t n_pad,
|
|
110
|
+
uint32_t n_swa,
|
|
111
|
+
llama_swa_type swa_type);
|
|
112
|
+
|
|
113
|
+
~llama_kv_cache_unified() = default;
|
|
114
|
+
|
|
115
|
+
//
|
|
116
|
+
// llama_memory_i
|
|
117
|
+
//
|
|
92
118
|
|
|
93
119
|
void clear() override;
|
|
94
|
-
void defrag() override;
|
|
95
120
|
|
|
96
121
|
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
|
|
97
122
|
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
|
|
98
|
-
void seq_keep(llama_seq_id seq_id)
|
|
123
|
+
void seq_keep(llama_seq_id seq_id) override;
|
|
99
124
|
void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
|
|
100
125
|
void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
|
|
101
126
|
|
|
102
|
-
llama_pos
|
|
127
|
+
llama_pos seq_pos_min(llama_seq_id seq_id) const override;
|
|
128
|
+
llama_pos seq_pos_max(llama_seq_id seq_id) const override;
|
|
103
129
|
|
|
104
|
-
|
|
130
|
+
//
|
|
131
|
+
// llama_kv_cache
|
|
132
|
+
//
|
|
133
|
+
|
|
134
|
+
void restore() override;
|
|
135
|
+
void commit() override;
|
|
136
|
+
|
|
137
|
+
bool update(llama_context & ctx) override;
|
|
138
|
+
|
|
139
|
+
void defrag_sched(float thold) override;
|
|
140
|
+
|
|
141
|
+
void set_full() override;
|
|
142
|
+
|
|
143
|
+
llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
|
|
144
|
+
llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
|
|
105
145
|
|
|
106
|
-
// find an empty slot of size "n_tokens" in the cache
|
|
107
146
|
// updates the cache head
|
|
108
|
-
// returns a structure holding information about the slot found
|
|
109
147
|
// Note: On success, it's important that cache.head points
|
|
110
148
|
// to the first cell of the slot.
|
|
111
|
-
|
|
149
|
+
bool find_slot(const llama_ubatch & batch) override;
|
|
112
150
|
|
|
113
|
-
|
|
114
|
-
uint32_t get_padding(const llama_cparams & cparams) const;
|
|
151
|
+
bool get_can_shift() const override;
|
|
115
152
|
|
|
116
|
-
//
|
|
117
|
-
uint32_t cell_max() const;
|
|
153
|
+
// state write/load
|
|
118
154
|
|
|
119
|
-
|
|
120
|
-
|
|
155
|
+
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
|
|
156
|
+
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
|
|
121
157
|
|
|
122
|
-
//
|
|
158
|
+
//
|
|
159
|
+
// llama_kv_cache_unified specific API
|
|
160
|
+
//
|
|
123
161
|
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
} defrag_info;
|
|
162
|
+
uint32_t get_n() const;
|
|
163
|
+
uint32_t get_size() const;
|
|
127
164
|
|
|
128
|
-
//
|
|
129
|
-
|
|
165
|
+
// get views of the current state of the cache
|
|
166
|
+
lm_ggml_tensor * get_k(lm_ggml_context * ctx, int32_t il) const;
|
|
167
|
+
lm_ggml_tensor * get_v(lm_ggml_context * ctx, int32_t il) const;
|
|
130
168
|
|
|
131
|
-
//
|
|
169
|
+
// store k_cur and v_cur in the cache based on the current head location
|
|
170
|
+
lm_ggml_tensor * cpy_k(lm_ggml_context * ctx, lm_ggml_tensor * k_cur, int32_t il) const;
|
|
171
|
+
lm_ggml_tensor * cpy_v(lm_ggml_context * ctx, lm_ggml_tensor * v_cur, int32_t il) const;
|
|
132
172
|
|
|
133
|
-
void
|
|
134
|
-
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1);
|
|
173
|
+
void prune_swa(llama_seq_id seq_id, llama_pos pmin, llama_pos pmax);
|
|
135
174
|
|
|
136
|
-
|
|
175
|
+
void set_input_kq_mask (lm_ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
|
|
176
|
+
void set_input_k_shift (lm_ggml_tensor * dst) const;
|
|
177
|
+
void set_input_pos_bucket(lm_ggml_tensor * dst, const llama_ubatch * ubatch) const;
|
|
137
178
|
|
|
179
|
+
private:
|
|
180
|
+
const llama_model & model;
|
|
138
181
|
const llama_hparams & hparams;
|
|
139
182
|
|
|
140
|
-
|
|
183
|
+
struct kv_cell {
|
|
184
|
+
llama_pos pos = -1;
|
|
185
|
+
llama_pos delta = 0;
|
|
141
186
|
|
|
142
|
-
|
|
143
|
-
|
|
187
|
+
// TODO: replace with bitset uint64_t
|
|
188
|
+
std::set<llama_seq_id> seq_id;
|
|
189
|
+
|
|
190
|
+
bool has_seq_id(const llama_seq_id & id) const {
|
|
191
|
+
return seq_id.find(id) != seq_id.end();
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
bool is_empty() const {
|
|
195
|
+
return seq_id.empty();
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
bool is_same_seq(const kv_cell & other) const {
|
|
199
|
+
return seq_id == other.seq_id;
|
|
200
|
+
}
|
|
201
|
+
};
|
|
144
202
|
|
|
145
|
-
|
|
146
|
-
|
|
203
|
+
struct kv_layer {
|
|
204
|
+
// layer index in the model
|
|
205
|
+
// note: can be different from the layer index in the KV cache
|
|
206
|
+
uint32_t il;
|
|
147
207
|
|
|
208
|
+
lm_ggml_tensor * k;
|
|
209
|
+
lm_ggml_tensor * v;
|
|
210
|
+
};
|
|
211
|
+
|
|
212
|
+
bool has_shift = false;
|
|
213
|
+
bool do_defrag = false;
|
|
148
214
|
bool v_trans = true; // the value tensor is transposed
|
|
149
|
-
bool can_shift = false;
|
|
150
215
|
|
|
151
|
-
//
|
|
152
|
-
|
|
153
|
-
//
|
|
154
|
-
uint32_t head = 0;
|
|
155
|
-
uint32_t size = 0;
|
|
156
|
-
uint32_t used = 0; // used cells (i.e. at least one seq_id)
|
|
216
|
+
uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
|
|
217
|
+
uint32_t size = 0; // total number of cells, shared across all sequences
|
|
218
|
+
uint32_t used = 0; // used cells (i.e. at least one seq_id) (TODO: add `struct kv_cells` and keep track automaticallt)
|
|
157
219
|
|
|
158
220
|
// computed before each graph build
|
|
159
221
|
uint32_t n = 0;
|
|
160
222
|
|
|
161
|
-
|
|
223
|
+
const uint32_t n_seq_max = 1;
|
|
162
224
|
|
|
163
|
-
|
|
164
|
-
|
|
225
|
+
// required padding
|
|
226
|
+
const uint32_t n_pad = 1;
|
|
165
227
|
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
228
|
+
// SWA
|
|
229
|
+
const uint32_t n_swa = 0;
|
|
230
|
+
|
|
231
|
+
const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
|
|
169
232
|
|
|
170
233
|
std::vector<lm_ggml_context_ptr> ctxs;
|
|
171
234
|
std::vector<lm_ggml_backend_buffer_ptr> bufs;
|
|
172
235
|
|
|
236
|
+
std::vector<kv_cell> cells; // TODO: replace with `struct kv_cells`
|
|
237
|
+
std::vector<kv_layer> layers;
|
|
238
|
+
|
|
239
|
+
// model layer id -> KV cache layer id
|
|
240
|
+
std::unordered_map<int32_t, int32_t> map_layer_ids;
|
|
241
|
+
|
|
242
|
+
// recovery information used to restore the KV cells to their original state in case of a failure
|
|
243
|
+
struct {
|
|
244
|
+
void clear() {
|
|
245
|
+
cells.clear();
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
std::unordered_map<uint32_t, kv_cell> cells;
|
|
249
|
+
} recovery;
|
|
250
|
+
|
|
251
|
+
// defrag
|
|
252
|
+
struct {
|
|
253
|
+
std::vector<uint32_t> ids;
|
|
254
|
+
} defrag_info;
|
|
255
|
+
|
|
256
|
+
// return true if cells have been moved
|
|
257
|
+
bool defrag_prepare(int32_t n_max_nodes);
|
|
258
|
+
|
|
259
|
+
// find how many cells are currently in use
|
|
260
|
+
uint32_t cell_max() const;
|
|
261
|
+
|
|
262
|
+
size_t total_size() const;
|
|
263
|
+
|
|
264
|
+
size_t size_k_bytes() const;
|
|
265
|
+
size_t size_v_bytes() const;
|
|
266
|
+
|
|
267
|
+
bool is_masked_swa(llama_pos p0, llama_pos p1) const;
|
|
268
|
+
|
|
269
|
+
lm_ggml_tensor * build_rope_shift(
|
|
270
|
+
const llama_cparams & cparams,
|
|
271
|
+
lm_ggml_context * ctx,
|
|
272
|
+
lm_ggml_tensor * cur,
|
|
273
|
+
lm_ggml_tensor * shift,
|
|
274
|
+
lm_ggml_tensor * factors,
|
|
275
|
+
float freq_base,
|
|
276
|
+
float freq_scale) const;
|
|
277
|
+
|
|
278
|
+
llm_graph_result_ptr build_graph_shift(
|
|
279
|
+
const llama_cparams & cparams,
|
|
280
|
+
lm_ggml_context * ctx,
|
|
281
|
+
lm_ggml_cgraph * gf) const;
|
|
282
|
+
|
|
283
|
+
llm_graph_result_ptr build_graph_defrag(
|
|
284
|
+
const llama_cparams & cparams,
|
|
285
|
+
lm_ggml_context * ctx,
|
|
286
|
+
lm_ggml_cgraph * gf) const;
|
|
287
|
+
|
|
173
288
|
void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
|
|
174
289
|
void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
|
|
175
290
|
|
|
@@ -177,111 +292,224 @@ private:
|
|
|
177
292
|
bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
|
|
178
293
|
};
|
|
179
294
|
|
|
180
|
-
// TODO: temporary reusing llama_kv_cache_unified -- implement recurrent cache and simplify llama_kv_cache_unified
|
|
181
|
-
//class llama_kv_cache_recurrent : public llama_kv_cache_unified {
|
|
182
|
-
//public:
|
|
183
|
-
// using llama_kv_cache_unified::llama_kv_cache_unified;
|
|
184
|
-
//};
|
|
185
|
-
|
|
186
295
|
//
|
|
187
|
-
//
|
|
296
|
+
// llama_kv_cache_unified_iswa
|
|
188
297
|
//
|
|
189
298
|
|
|
190
|
-
//
|
|
191
|
-
//
|
|
192
|
-
|
|
193
|
-
struct llama_kv_cache_state {
|
|
194
|
-
uint32_t head = 0;
|
|
195
|
-
uint32_t n = 0;
|
|
196
|
-
} old_state;
|
|
299
|
+
// utilizes two instances of llama_kv_cache_unified
|
|
300
|
+
// the first instance is for the non-SWA layers of the model and the second instance is for the SWA layers
|
|
301
|
+
// upon successful commit, the SWA cache removes old tokens outside the n_swa window
|
|
197
302
|
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
303
|
+
class llama_kv_cache_unified_iswa : public llama_kv_cache {
|
|
304
|
+
public:
|
|
305
|
+
llama_kv_cache_unified_iswa(
|
|
306
|
+
const llama_model & model,
|
|
307
|
+
lm_ggml_type type_k,
|
|
308
|
+
lm_ggml_type type_v,
|
|
309
|
+
bool v_trans,
|
|
310
|
+
bool offload,
|
|
311
|
+
bool swa_full,
|
|
312
|
+
uint32_t kv_size,
|
|
313
|
+
uint32_t n_seq_max,
|
|
314
|
+
uint32_t n_batch,
|
|
315
|
+
uint32_t n_pad);
|
|
201
316
|
|
|
202
|
-
|
|
317
|
+
~llama_kv_cache_unified_iswa() = default;
|
|
203
318
|
|
|
204
|
-
|
|
319
|
+
//
|
|
320
|
+
// llama_memory_i
|
|
321
|
+
//
|
|
205
322
|
|
|
206
|
-
|
|
207
|
-
old_state.head = cache.head;
|
|
208
|
-
old_state.n = cache.n;
|
|
209
|
-
}
|
|
323
|
+
void clear() override;
|
|
210
324
|
|
|
211
|
-
|
|
212
|
-
void
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
slot_boundaries.push_back(slot.boundaries);
|
|
217
|
-
}
|
|
218
|
-
}
|
|
219
|
-
}
|
|
325
|
+
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
|
|
326
|
+
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
|
|
327
|
+
void seq_keep(llama_seq_id seq_id) override;
|
|
328
|
+
void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
|
|
329
|
+
void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
|
|
220
330
|
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
}
|
|
235
|
-
}
|
|
236
|
-
}
|
|
237
|
-
};
|
|
331
|
+
llama_pos seq_pos_min(llama_seq_id seq_id) const override;
|
|
332
|
+
llama_pos seq_pos_max(llama_seq_id seq_id) const override;
|
|
333
|
+
|
|
334
|
+
//
|
|
335
|
+
// llama_kv_cache
|
|
336
|
+
//
|
|
337
|
+
|
|
338
|
+
void restore() override;
|
|
339
|
+
void commit() override;
|
|
340
|
+
|
|
341
|
+
bool update(llama_context & ctx) override;
|
|
342
|
+
|
|
343
|
+
void defrag_sched(float thold) override;
|
|
238
344
|
|
|
239
|
-
|
|
240
|
-
int32_t llama_kv_cache_n_tokens(const llama_kv_cache * kv);
|
|
345
|
+
void set_full() override;
|
|
241
346
|
|
|
242
|
-
|
|
347
|
+
llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
|
|
348
|
+
llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
|
|
243
349
|
|
|
244
|
-
|
|
350
|
+
bool find_slot(const llama_ubatch & batch) override;
|
|
245
351
|
|
|
246
|
-
bool
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
llama_pos p0,
|
|
250
|
-
llama_pos p1);
|
|
352
|
+
bool get_can_shift() const override;
|
|
353
|
+
|
|
354
|
+
// state write/load
|
|
251
355
|
|
|
252
|
-
void
|
|
253
|
-
|
|
254
|
-
llama_seq_id seq_id_src,
|
|
255
|
-
llama_seq_id seq_id_dst,
|
|
256
|
-
llama_pos p0,
|
|
257
|
-
llama_pos p1);
|
|
356
|
+
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
|
|
357
|
+
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
|
|
258
358
|
|
|
259
|
-
|
|
359
|
+
//
|
|
360
|
+
// llama_kv_cache_unified_iswa specific API
|
|
361
|
+
//
|
|
260
362
|
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
llama_seq_id seq_id,
|
|
264
|
-
llama_pos p0,
|
|
265
|
-
llama_pos p1,
|
|
266
|
-
llama_pos delta);
|
|
363
|
+
llama_kv_cache_unified * get_kv_base() const;
|
|
364
|
+
llama_kv_cache_unified * get_kv_swa () const;
|
|
267
365
|
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
llama_seq_id seq_id,
|
|
271
|
-
llama_pos p0,
|
|
272
|
-
llama_pos p1,
|
|
273
|
-
int d);
|
|
366
|
+
private:
|
|
367
|
+
const llama_hparams & hparams;
|
|
274
368
|
|
|
275
|
-
|
|
369
|
+
bool do_prune = true;
|
|
276
370
|
|
|
277
|
-
|
|
371
|
+
struct {
|
|
372
|
+
struct entry {
|
|
373
|
+
llama_pos pmin;
|
|
374
|
+
llama_pos pmax;
|
|
375
|
+
};
|
|
278
376
|
|
|
279
|
-
|
|
377
|
+
void clear() {
|
|
378
|
+
pos.clear();
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
// used to perform SWA pruning of old tokens
|
|
382
|
+
std::unordered_map<llama_seq_id, entry> pos;
|
|
383
|
+
} pending;
|
|
384
|
+
|
|
385
|
+
std::unique_ptr<llama_kv_cache_unified> kv_base;
|
|
386
|
+
std::unique_ptr<llama_kv_cache_unified> kv_swa;
|
|
387
|
+
};
|
|
280
388
|
|
|
281
389
|
//
|
|
282
|
-
//
|
|
390
|
+
// llama_kv_cache_recurrent
|
|
283
391
|
//
|
|
284
392
|
|
|
285
|
-
|
|
393
|
+
class llama_kv_cache_recurrent : public llama_kv_cache {
|
|
394
|
+
public:
|
|
395
|
+
struct kv_cell {
|
|
396
|
+
llama_pos pos = -1;
|
|
397
|
+
int32_t src = -1; // used to copy states
|
|
398
|
+
int32_t tail = -1;
|
|
399
|
+
|
|
400
|
+
std::set<llama_seq_id> seq_id;
|
|
401
|
+
|
|
402
|
+
bool has_seq_id(const llama_seq_id & id) const {
|
|
403
|
+
return seq_id.find(id) != seq_id.end();
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
bool is_empty() const {
|
|
407
|
+
return seq_id.empty();
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
bool is_same_seq(const kv_cell & other) const {
|
|
411
|
+
return seq_id == other.seq_id;
|
|
412
|
+
}
|
|
413
|
+
};
|
|
414
|
+
|
|
415
|
+
llama_kv_cache_recurrent(
|
|
416
|
+
const llama_model & model,
|
|
417
|
+
lm_ggml_type type_k,
|
|
418
|
+
lm_ggml_type type_v,
|
|
419
|
+
bool offload,
|
|
420
|
+
uint32_t kv_size,
|
|
421
|
+
uint32_t n_seq_max);
|
|
422
|
+
|
|
423
|
+
~llama_kv_cache_recurrent() = default;
|
|
424
|
+
|
|
425
|
+
//
|
|
426
|
+
// llama_memory_i
|
|
427
|
+
//
|
|
286
428
|
|
|
287
|
-
void
|
|
429
|
+
void clear() override;
|
|
430
|
+
|
|
431
|
+
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
|
|
432
|
+
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
|
|
433
|
+
void seq_keep(llama_seq_id seq_id) override;
|
|
434
|
+
void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
|
|
435
|
+
void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
|
|
436
|
+
|
|
437
|
+
llama_pos seq_pos_min(llama_seq_id seq_id) const override;
|
|
438
|
+
llama_pos seq_pos_max(llama_seq_id seq_id) const override;
|
|
439
|
+
|
|
440
|
+
//
|
|
441
|
+
// llama_kv_cache
|
|
442
|
+
//
|
|
443
|
+
|
|
444
|
+
void restore() override;
|
|
445
|
+
void commit() override;
|
|
446
|
+
|
|
447
|
+
bool update(llama_context & ctx) override;
|
|
448
|
+
|
|
449
|
+
void defrag_sched(float thold) override;
|
|
450
|
+
|
|
451
|
+
void set_full() override;
|
|
452
|
+
|
|
453
|
+
llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
|
|
454
|
+
llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
|
|
455
|
+
|
|
456
|
+
bool find_slot(const llama_ubatch & batch) override;
|
|
457
|
+
|
|
458
|
+
bool get_can_shift() const override;
|
|
459
|
+
|
|
460
|
+
// TODO: temporary methods - they are not really const as they do const_cast<>, fix this
|
|
461
|
+
int32_t s_copy(int i) const;
|
|
462
|
+
float s_mask(int i) const;
|
|
463
|
+
|
|
464
|
+
// state write/load
|
|
465
|
+
|
|
466
|
+
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
|
|
467
|
+
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
|
|
468
|
+
|
|
469
|
+
uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
|
|
470
|
+
uint32_t size = 0; // total number of cells, shared across all sequences
|
|
471
|
+
uint32_t used = 0; // used cells (i.e. at least one seq_id)
|
|
472
|
+
|
|
473
|
+
// computed before each graph build
|
|
474
|
+
uint32_t n = 0;
|
|
475
|
+
|
|
476
|
+
std::vector<kv_cell> cells;
|
|
477
|
+
|
|
478
|
+
std::vector<lm_ggml_tensor *> k_l; // per layer
|
|
479
|
+
std::vector<lm_ggml_tensor *> v_l;
|
|
480
|
+
|
|
481
|
+
private:
|
|
482
|
+
//const llama_model & model;
|
|
483
|
+
const llama_hparams & hparams;
|
|
484
|
+
|
|
485
|
+
// commit/restore cache
|
|
486
|
+
// TODO: rework for recurrent cache
|
|
487
|
+
struct slot_range {
|
|
488
|
+
uint32_t c0 = 0; // note: these are cell indices, not sequence positions
|
|
489
|
+
uint32_t c1 = 0;
|
|
490
|
+
};
|
|
491
|
+
|
|
492
|
+
// pending cell updates that are not yet committed
|
|
493
|
+
struct {
|
|
494
|
+
std::vector<slot_range> ranges;
|
|
495
|
+
} pending;
|
|
496
|
+
|
|
497
|
+
const uint32_t n_seq_max = 1;
|
|
498
|
+
|
|
499
|
+
std::vector<lm_ggml_context_ptr> ctxs;
|
|
500
|
+
std::vector<lm_ggml_backend_buffer_ptr> bufs;
|
|
501
|
+
|
|
502
|
+
// find how many cells are currently in use
|
|
503
|
+
uint32_t cell_max() const;
|
|
504
|
+
|
|
505
|
+
size_t total_size() const;
|
|
506
|
+
|
|
507
|
+
size_t size_k_bytes() const;
|
|
508
|
+
size_t size_v_bytes() const;
|
|
509
|
+
|
|
510
|
+
void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
|
|
511
|
+
void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
|
|
512
|
+
|
|
513
|
+
bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
|
|
514
|
+
bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
|
|
515
|
+
};
|