cactus-react-native 0.0.1 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (189) hide show
  1. package/LICENSE.txt +20 -0
  2. package/README.md +3 -1
  3. package/android/src/main/CMakeLists.txt +60 -21
  4. package/android/src/main/java/com/cactus/Cactus.java +465 -0
  5. package/android/src/main/java/com/cactus/LlamaContext.java +199 -0
  6. package/android/src/main/jni.cpp +325 -10
  7. package/android/src/main/jniLibs/arm64-v8a/libcactus.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8.so +0 -0
  9. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2.so +0 -0
  10. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_dotprod.so +0 -0
  11. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_dotprod_i8mm.so +0 -0
  12. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_i8mm.so +0 -0
  13. package/android/src/main/jniLibs/x86_64/libcactus.so +0 -0
  14. package/android/src/main/jniLibs/x86_64/libcactus_x86_64.so +0 -0
  15. package/android/src/newarch/java/com/cactus/CactusModule.java +79 -7
  16. package/android/src/oldarch/java/com/cactus/CactusModule.java +70 -0
  17. package/cactus-react-native.podspec +0 -3
  18. package/ios/CMakeLists.txt +56 -36
  19. package/ios/Cactus.mm +243 -2
  20. package/ios/CactusContext.h +22 -0
  21. package/ios/CactusContext.mm +176 -1
  22. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/cactus.h +92 -5
  23. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/cactus_ffi.h +229 -0
  24. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/chat.h +2 -0
  25. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/common.h +42 -51
  26. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-backend.h +4 -4
  27. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-common.h +12 -6
  28. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpp.h +1 -1
  29. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu.h +5 -0
  30. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-impl.h +52 -18
  31. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-metal-impl.h +106 -14
  32. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-opt.h +49 -28
  33. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml.h +87 -106
  34. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-arch.h +16 -0
  35. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-batch.h +2 -1
  36. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-chat.h +7 -2
  37. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-context.h +44 -33
  38. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-cparams.h +1 -0
  39. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-graph.h +83 -17
  40. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-hparams.h +44 -2
  41. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-kv-cache.h +407 -179
  42. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-memory.h +13 -2
  43. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-model-loader.h +5 -3
  44. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-model-saver.h +37 -0
  45. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-model.h +24 -2
  46. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama-vocab.h +6 -0
  47. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/llama.h +102 -142
  48. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/minja/chat-template.hpp +23 -11
  49. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/minja/minja.hpp +186 -127
  50. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Info.plist +0 -0
  51. package/ios/cactus.xcframework/ios-arm64/cactus.framework/cactus +0 -0
  52. package/ios/cactus.xcframework/ios-arm64/cactus.framework/ggml-llama.metallib +0 -0
  53. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/cactus.h +92 -5
  54. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/cactus_ffi.h +229 -0
  55. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/chat.h +2 -0
  56. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/common.h +42 -51
  57. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-backend.h +4 -4
  58. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-common.h +12 -6
  59. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpp.h +1 -1
  60. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu.h +5 -0
  61. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-impl.h +52 -18
  62. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-metal-impl.h +106 -14
  63. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-opt.h +49 -28
  64. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml.h +87 -106
  65. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-arch.h +16 -0
  66. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-batch.h +2 -1
  67. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-chat.h +7 -2
  68. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-context.h +44 -33
  69. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-cparams.h +1 -0
  70. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-graph.h +83 -17
  71. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-hparams.h +44 -2
  72. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-kv-cache.h +407 -179
  73. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-memory.h +13 -2
  74. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-loader.h +5 -3
  75. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-saver.h +37 -0
  76. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-model.h +24 -2
  77. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama-vocab.h +6 -0
  78. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/llama.h +102 -142
  79. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/minja/chat-template.hpp +23 -11
  80. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/minja/minja.hpp +186 -127
  81. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Info.plist +0 -0
  82. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/_CodeSignature/CodeResources +1 -1
  83. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
  84. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/ggml-llama-sim.metallib +0 -0
  85. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/cactus.h +92 -5
  86. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/cactus_ffi.h +229 -0
  87. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/chat.h +2 -0
  88. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/common.h +42 -51
  89. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-backend.h +4 -4
  90. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-common.h +12 -6
  91. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpp.h +1 -1
  92. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu.h +5 -0
  93. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-impl.h +52 -18
  94. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-metal-impl.h +106 -14
  95. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-opt.h +49 -28
  96. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml.h +87 -106
  97. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-arch.h +16 -0
  98. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-batch.h +2 -1
  99. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-chat.h +7 -2
  100. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-context.h +44 -33
  101. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-cparams.h +1 -0
  102. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-graph.h +83 -17
  103. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-hparams.h +44 -2
  104. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-kv-cache.h +407 -179
  105. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-memory.h +13 -2
  106. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-model-loader.h +5 -3
  107. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-model-saver.h +37 -0
  108. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-model.h +24 -2
  109. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama-vocab.h +6 -0
  110. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/llama.h +102 -142
  111. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/minja/chat-template.hpp +23 -11
  112. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/minja/minja.hpp +186 -127
  113. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Info.plist +0 -0
  114. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/cactus +0 -0
  115. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/ggml-llama.metallib +0 -0
  116. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/cactus.h +92 -5
  117. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/cactus_ffi.h +229 -0
  118. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/chat.h +2 -0
  119. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/common.h +42 -51
  120. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-backend.h +4 -4
  121. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-common.h +12 -6
  122. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpp.h +1 -1
  123. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu.h +5 -0
  124. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-impl.h +52 -18
  125. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-metal-impl.h +106 -14
  126. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-opt.h +49 -28
  127. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml.h +87 -106
  128. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-arch.h +16 -0
  129. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-batch.h +2 -1
  130. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-chat.h +7 -2
  131. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-context.h +44 -33
  132. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-cparams.h +1 -0
  133. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-graph.h +83 -17
  134. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-hparams.h +44 -2
  135. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-kv-cache.h +407 -179
  136. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-memory.h +13 -2
  137. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-loader.h +5 -3
  138. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-model-saver.h +37 -0
  139. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-model.h +24 -2
  140. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama-vocab.h +6 -0
  141. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/llama.h +102 -142
  142. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/minja/chat-template.hpp +23 -11
  143. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/minja/minja.hpp +186 -127
  144. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Info.plist +0 -0
  145. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/_CodeSignature/CodeResources +1 -1
  146. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
  147. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/ggml-llama-sim.metallib +0 -0
  148. package/lib/commonjs/NativeCactus.js +1 -0
  149. package/lib/commonjs/NativeCactus.js.map +1 -1
  150. package/lib/commonjs/index.js +112 -0
  151. package/lib/commonjs/index.js.map +1 -1
  152. package/lib/commonjs/tools.js +118 -0
  153. package/lib/commonjs/tools.js.map +1 -0
  154. package/lib/module/NativeCactus.js +3 -0
  155. package/lib/module/NativeCactus.js.map +1 -1
  156. package/lib/module/index.js +87 -1
  157. package/lib/module/index.js.map +1 -1
  158. package/lib/module/tools.js +110 -0
  159. package/lib/module/tools.js.map +1 -0
  160. package/lib/typescript/NativeCactus.d.ts +30 -1
  161. package/lib/typescript/NativeCactus.d.ts.map +1 -1
  162. package/lib/typescript/index.d.ts +21 -2
  163. package/lib/typescript/index.d.ts.map +1 -1
  164. package/lib/typescript/tools.d.ts +38 -0
  165. package/lib/typescript/tools.d.ts.map +1 -0
  166. package/package.json +6 -3
  167. package/src/NativeCactus.ts +62 -1
  168. package/src/index.ts +113 -2
  169. package/src/tools.ts +127 -0
  170. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-aarch64.h +0 -8
  171. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-impl.h +0 -531
  172. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-quants.h +0 -63
  173. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/ggml-cpu-traits.h +0 -38
  174. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/sgemm.h +0 -14
  175. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-aarch64.h +0 -8
  176. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-impl.h +0 -531
  177. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-quants.h +0 -63
  178. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-traits.h +0 -38
  179. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/sgemm.h +0 -14
  180. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-aarch64.h +0 -8
  181. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-impl.h +0 -531
  182. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-quants.h +0 -63
  183. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/ggml-cpu-traits.h +0 -38
  184. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/sgemm.h +0 -14
  185. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-aarch64.h +0 -8
  186. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-impl.h +0 -531
  187. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-quants.h +0 -63
  188. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/ggml-cpu-traits.h +0 -38
  189. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/sgemm.h +0 -14
@@ -2,174 +2,289 @@
2
2
 
3
3
  #include "llama.h"
4
4
  #include "llama-io.h"
5
+ #include "llama-graph.h"
5
6
  #include "llama-memory.h"
6
7
 
7
8
  #include "ggml-cpp.h"
8
9
 
9
- #include <functional>
10
10
  #include <set>
11
+ #include <unordered_map>
11
12
  #include <vector>
12
13
 
13
14
  struct llama_cparams;
14
15
  struct llama_hparams;
15
16
  struct llama_ubatch;
17
+ struct llama_sbatch;
18
+ struct llama_model;
19
+ struct llama_context;
16
20
 
17
21
  struct llama_kv_cache : public llama_memory_i {
18
- using llama_memory_i::llama_memory_i;
22
+ virtual ~llama_kv_cache() = default;
19
23
 
20
- virtual int32_t get_n_tokens() const = 0;
21
- virtual uint32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache
24
+ // call if batch processing fails - restores the cache state
25
+ virtual void restore() = 0;
22
26
 
27
+ // call after successful batch processing - clears any pending state
28
+ virtual void commit() = 0;
29
+
30
+ // process any pending defrag/shift/etc. operations
31
+ // optionally call once before processing a new batch
32
+ virtual bool update(llama_context & lctx) = 0;
33
+
34
+ // schedule a defrag if the fragmentation threshold is exceeded. otherwise, do nothing
35
+ virtual void defrag_sched(float thold) = 0;
36
+
37
+ // simulate full cache, used for allocating worst-case compute buffers
38
+ virtual void set_full() = 0;
39
+
40
+ //
41
+ // batch processing
42
+ //
43
+
44
+ // =============================================================================================================
45
+ // TODO: refactor and simplify this
46
+
47
+ virtual llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) = 0;
48
+
49
+ // different KV caches require different batch splitting strategies
50
+ virtual llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const = 0;
51
+
52
+ // find an empty slot of size "n_tokens" in the cache
53
+ virtual bool find_slot(const llama_ubatch & batch) = 0;
54
+
55
+ // =============================================================================================================
56
+
57
+ // getters
23
58
  virtual bool get_can_shift() const = 0;
24
59
 
25
60
  bool get_can_edit() const override { return get_can_shift(); }
61
+
62
+ //
63
+ // state write/read
64
+ //
65
+
66
+ virtual void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const = 0;
67
+ virtual void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) = 0;
26
68
  };
27
69
 
28
- struct llama_kv_cell {
29
- llama_pos pos = -1;
30
- llama_pos delta = 0;
31
- int32_t src = -1; // used by recurrent state models to copy states
32
- int32_t tail = -1;
70
+ //
71
+ // llama_kv_cache_guard
72
+ //
33
73
 
34
- std::set<llama_seq_id> seq_id;
74
+ struct llama_kv_cache_guard {
75
+ llama_kv_cache_guard(llama_kv_cache * kv) : kv(kv) {}
35
76
 
36
- bool has_seq_id(const llama_seq_id & id) const {
37
- return seq_id.find(id) != seq_id.end();
77
+ ~llama_kv_cache_guard() {
78
+ kv->restore();
38
79
  }
39
80
 
40
- bool is_empty() const {
41
- return seq_id.empty();
81
+ void commit() {
82
+ kv->commit();
42
83
  }
43
84
 
44
- bool is_same_seq(const llama_kv_cell & other) const {
45
- return seq_id == other.seq_id;
46
- }
85
+ private:
86
+ llama_kv_cache * kv;
47
87
  };
48
88
 
49
- // a structure holds information about the slot found in llama_kv_cache_find_slot
50
- struct llama_kv_cache_slot_info {
51
- std::pair<uint32_t, uint32_t> boundaries; // slot boundaries [begin, end)
52
- bool found = false; // the slot was found
53
-
54
- explicit llama_kv_cache_slot_info(bool found_) : found{found_} {}
55
- llama_kv_cache_slot_info(uint32_t begin, uint32_t end) : boundaries{begin, end}, found{true} {}
56
-
57
- operator bool() const { return found; }
58
- };
89
+ //
90
+ // llama_kv_cache_unified
91
+ //
59
92
 
60
- // ring-buffer of cached KV data
61
- // TODO: pimpl
62
- // TODO: add notion of max sequences
63
93
  class llama_kv_cache_unified : public llama_kv_cache {
64
94
  public:
65
- // can be used to query data from the model if needed
66
- struct callbacks {
67
- std::function<lm_ggml_tensor * (uint32_t n_ctx_per_seq, int il)> get_rope_factors;
68
- };
69
-
70
- llama_kv_cache_unified(
71
- const llama_hparams & hparams,
72
- callbacks cbs);
95
+ static uint32_t get_padding(const llama_cparams & cparams);
73
96
 
74
- virtual ~llama_kv_cache_unified() = default;
97
+ // this callback is used to filter out layers that should not be included in the cache
98
+ using layer_filter_cb = std::function<bool(int32_t il)>;
75
99
 
76
- // TODO: become constructor
77
- bool init(
78
- const llama_model & model, // TODO: do not reference the model
79
- const llama_cparams & cparams,
80
- lm_ggml_type type_k,
81
- lm_ggml_type type_v,
82
- uint32_t kv_size,
83
- bool offload);
84
-
85
- int32_t get_n_tokens() const override;
86
- uint32_t get_used_cells() const override;
87
-
88
- size_t total_size() const;
89
-
90
- // TODO: better data structures to reduce the cost of this operation
91
- llama_pos pos_max() const;
100
+ llama_kv_cache_unified(
101
+ const llama_model & model,
102
+ layer_filter_cb && filter,
103
+ lm_ggml_type type_k,
104
+ lm_ggml_type type_v,
105
+ bool v_trans,
106
+ bool offload,
107
+ uint32_t kv_size,
108
+ uint32_t n_seq_max,
109
+ uint32_t n_pad,
110
+ uint32_t n_swa,
111
+ llama_swa_type swa_type);
112
+
113
+ ~llama_kv_cache_unified() = default;
114
+
115
+ //
116
+ // llama_memory_i
117
+ //
92
118
 
93
119
  void clear() override;
94
- void defrag() override;
95
120
 
96
121
  bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
97
122
  void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
98
- void seq_keep(llama_seq_id seq_id) override;
123
+ void seq_keep(llama_seq_id seq_id) override;
99
124
  void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
100
125
  void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
101
126
 
102
- llama_pos seq_pos_max(llama_seq_id seq_id) override;
127
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
128
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
103
129
 
104
- bool get_can_shift() const override;
130
+ //
131
+ // llama_kv_cache
132
+ //
133
+
134
+ void restore() override;
135
+ void commit() override;
136
+
137
+ bool update(llama_context & ctx) override;
138
+
139
+ void defrag_sched(float thold) override;
140
+
141
+ void set_full() override;
142
+
143
+ llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
144
+ llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
105
145
 
106
- // find an empty slot of size "n_tokens" in the cache
107
146
  // updates the cache head
108
- // returns a structure holding information about the slot found
109
147
  // Note: On success, it's important that cache.head points
110
148
  // to the first cell of the slot.
111
- llama_kv_cache_slot_info find_slot(const llama_ubatch & batch);
149
+ bool find_slot(const llama_ubatch & batch) override;
112
150
 
113
- // TODO: maybe not needed
114
- uint32_t get_padding(const llama_cparams & cparams) const;
151
+ bool get_can_shift() const override;
115
152
 
116
- // find how many cells are currently in use
117
- uint32_t cell_max() const;
153
+ // state write/load
118
154
 
119
- size_t size_k_bytes() const;
120
- size_t size_v_bytes() const;
155
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
156
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
121
157
 
122
- // defrag
158
+ //
159
+ // llama_kv_cache_unified specific API
160
+ //
123
161
 
124
- struct {
125
- std::vector<uint32_t> ids;
126
- } defrag_info;
162
+ uint32_t get_n() const;
163
+ uint32_t get_size() const;
127
164
 
128
- // return true if cells have been moved
129
- bool defrag_prepare(int32_t n_max_nodes);
165
+ // get views of the current state of the cache
166
+ lm_ggml_tensor * get_k(lm_ggml_context * ctx, int32_t il) const;
167
+ lm_ggml_tensor * get_v(lm_ggml_context * ctx, int32_t il) const;
130
168
 
131
- // state save/load
169
+ // store k_cur and v_cur in the cache based on the current head location
170
+ lm_ggml_tensor * cpy_k(lm_ggml_context * ctx, lm_ggml_tensor * k_cur, int32_t il) const;
171
+ lm_ggml_tensor * cpy_v(lm_ggml_context * ctx, lm_ggml_tensor * v_cur, int32_t il) const;
132
172
 
133
- void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const;
134
- void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1);
173
+ void prune_swa(llama_seq_id seq_id, llama_pos pmin, llama_pos pmax);
135
174
 
136
- // members
175
+ void set_input_kq_mask (lm_ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
176
+ void set_input_k_shift (lm_ggml_tensor * dst) const;
177
+ void set_input_pos_bucket(lm_ggml_tensor * dst, const llama_ubatch * ubatch) const;
137
178
 
179
+ private:
180
+ const llama_model & model;
138
181
  const llama_hparams & hparams;
139
182
 
140
- callbacks cbs;
183
+ struct kv_cell {
184
+ llama_pos pos = -1;
185
+ llama_pos delta = 0;
141
186
 
142
- bool has_shift = false;
143
- bool do_defrag = false;
187
+ // TODO: replace with bitset uint64_t
188
+ std::set<llama_seq_id> seq_id;
189
+
190
+ bool has_seq_id(const llama_seq_id & id) const {
191
+ return seq_id.find(id) != seq_id.end();
192
+ }
193
+
194
+ bool is_empty() const {
195
+ return seq_id.empty();
196
+ }
197
+
198
+ bool is_same_seq(const kv_cell & other) const {
199
+ return seq_id == other.seq_id;
200
+ }
201
+ };
144
202
 
145
- // TODO: remove this and implement llama_kv_cache_recurrent instead
146
- bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
203
+ struct kv_layer {
204
+ // layer index in the model
205
+ // note: can be different from the layer index in the KV cache
206
+ uint32_t il;
147
207
 
208
+ lm_ggml_tensor * k;
209
+ lm_ggml_tensor * v;
210
+ };
211
+
212
+ bool has_shift = false;
213
+ bool do_defrag = false;
148
214
  bool v_trans = true; // the value tensor is transposed
149
- bool can_shift = false;
150
215
 
151
- // Note: The value of head isn't only used to optimize searching
152
- // for a free KV slot. llama_decode_impl also uses it, so it
153
- // cannot be freely changed after a slot has been allocated.
154
- uint32_t head = 0;
155
- uint32_t size = 0;
156
- uint32_t used = 0; // used cells (i.e. at least one seq_id)
216
+ uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
217
+ uint32_t size = 0; // total number of cells, shared across all sequences
218
+ uint32_t used = 0; // used cells (i.e. at least one seq_id) (TODO: add `struct kv_cells` and keep track automaticallt)
157
219
 
158
220
  // computed before each graph build
159
221
  uint32_t n = 0;
160
222
 
161
- std::vector<llama_kv_cell> cells;
223
+ const uint32_t n_seq_max = 1;
162
224
 
163
- std::vector<lm_ggml_tensor *> k_l; // per layer
164
- std::vector<lm_ggml_tensor *> v_l;
225
+ // required padding
226
+ const uint32_t n_pad = 1;
165
227
 
166
- private:
167
- lm_ggml_type type_k = LM_GGML_TYPE_F16;
168
- lm_ggml_type type_v = LM_GGML_TYPE_F16;
228
+ // SWA
229
+ const uint32_t n_swa = 0;
230
+
231
+ const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
169
232
 
170
233
  std::vector<lm_ggml_context_ptr> ctxs;
171
234
  std::vector<lm_ggml_backend_buffer_ptr> bufs;
172
235
 
236
+ std::vector<kv_cell> cells; // TODO: replace with `struct kv_cells`
237
+ std::vector<kv_layer> layers;
238
+
239
+ // model layer id -> KV cache layer id
240
+ std::unordered_map<int32_t, int32_t> map_layer_ids;
241
+
242
+ // recovery information used to restore the KV cells to their original state in case of a failure
243
+ struct {
244
+ void clear() {
245
+ cells.clear();
246
+ }
247
+
248
+ std::unordered_map<uint32_t, kv_cell> cells;
249
+ } recovery;
250
+
251
+ // defrag
252
+ struct {
253
+ std::vector<uint32_t> ids;
254
+ } defrag_info;
255
+
256
+ // return true if cells have been moved
257
+ bool defrag_prepare(int32_t n_max_nodes);
258
+
259
+ // find how many cells are currently in use
260
+ uint32_t cell_max() const;
261
+
262
+ size_t total_size() const;
263
+
264
+ size_t size_k_bytes() const;
265
+ size_t size_v_bytes() const;
266
+
267
+ bool is_masked_swa(llama_pos p0, llama_pos p1) const;
268
+
269
+ lm_ggml_tensor * build_rope_shift(
270
+ const llama_cparams & cparams,
271
+ lm_ggml_context * ctx,
272
+ lm_ggml_tensor * cur,
273
+ lm_ggml_tensor * shift,
274
+ lm_ggml_tensor * factors,
275
+ float freq_base,
276
+ float freq_scale) const;
277
+
278
+ llm_graph_result_ptr build_graph_shift(
279
+ const llama_cparams & cparams,
280
+ lm_ggml_context * ctx,
281
+ lm_ggml_cgraph * gf) const;
282
+
283
+ llm_graph_result_ptr build_graph_defrag(
284
+ const llama_cparams & cparams,
285
+ lm_ggml_context * ctx,
286
+ lm_ggml_cgraph * gf) const;
287
+
173
288
  void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
174
289
  void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
175
290
 
@@ -177,111 +292,224 @@ private:
177
292
  bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
178
293
  };
179
294
 
180
- // TODO: temporary reusing llama_kv_cache_unified -- implement recurrent cache and simplify llama_kv_cache_unified
181
- //class llama_kv_cache_recurrent : public llama_kv_cache_unified {
182
- //public:
183
- // using llama_kv_cache_unified::llama_kv_cache_unified;
184
- //};
185
-
186
295
  //
187
- // kv cache restore
296
+ // llama_kv_cache_unified_iswa
188
297
  //
189
298
 
190
- // saves the kv_cache state for future recovery.
191
- // used to rollback llama_kv_cache_find_slot changes.
192
- struct llama_kv_slot_restorer {
193
- struct llama_kv_cache_state {
194
- uint32_t head = 0;
195
- uint32_t n = 0;
196
- } old_state;
299
+ // utilizes two instances of llama_kv_cache_unified
300
+ // the first instance is for the non-SWA layers of the model and the second instance is for the SWA layers
301
+ // upon successful commit, the SWA cache removes old tokens outside the n_swa window
197
302
 
198
- // for non-recurrent models only
199
- // list of slots to restore
200
- std::vector<std::pair<uint32_t, uint32_t>> slot_boundaries;
303
+ class llama_kv_cache_unified_iswa : public llama_kv_cache {
304
+ public:
305
+ llama_kv_cache_unified_iswa(
306
+ const llama_model & model,
307
+ lm_ggml_type type_k,
308
+ lm_ggml_type type_v,
309
+ bool v_trans,
310
+ bool offload,
311
+ bool swa_full,
312
+ uint32_t kv_size,
313
+ uint32_t n_seq_max,
314
+ uint32_t n_batch,
315
+ uint32_t n_pad);
201
316
 
202
- bool do_restore = false;
317
+ ~llama_kv_cache_unified_iswa() = default;
203
318
 
204
- llama_kv_cache_unified & cache;
319
+ //
320
+ // llama_memory_i
321
+ //
205
322
 
206
- explicit llama_kv_slot_restorer(llama_kv_cache_unified & cache) : cache(cache) {
207
- old_state.head = cache.head;
208
- old_state.n = cache.n;
209
- }
323
+ void clear() override;
210
324
 
211
- // saves a slot information for future restoration
212
- void save(const llama_kv_cache_slot_info & slot) {
213
- if (slot) {
214
- do_restore = true;
215
- if (slot.boundaries.first != slot.boundaries.second) {
216
- slot_boundaries.push_back(slot.boundaries);
217
- }
218
- }
219
- }
325
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
326
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
327
+ void seq_keep(llama_seq_id seq_id) override;
328
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
329
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
220
330
 
221
- // must be explicitly called to restore the kv_cache state
222
- // and rollback changes from all llama_kv_cache_find_slot calls
223
- void restore() {
224
- if (do_restore) {
225
- cache.head = old_state.head;
226
- cache.n = old_state.n;
227
-
228
- if (cache.recurrent) { // recurrent models like Mamba or RWKV can't have a state partially erased
229
- cache.seq_rm(-1, -1, -1);
230
- } else {
231
- for (auto & slot : slot_boundaries) {
232
- cache.seq_rm(-1, slot.first, slot.second);
233
- }
234
- }
235
- }
236
- }
237
- };
331
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
332
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
333
+
334
+ //
335
+ // llama_kv_cache
336
+ //
337
+
338
+ void restore() override;
339
+ void commit() override;
340
+
341
+ bool update(llama_context & ctx) override;
342
+
343
+ void defrag_sched(float thold) override;
238
344
 
239
- // TODO: maybe become part of the public llama_kv_cache in the future
240
- int32_t llama_kv_cache_n_tokens(const llama_kv_cache * kv);
345
+ void set_full() override;
241
346
 
242
- int32_t llama_kv_cache_used_cells(const llama_kv_cache * kv);
347
+ llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
348
+ llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
243
349
 
244
- void llama_kv_cache_clear(llama_kv_cache * kv);
350
+ bool find_slot(const llama_ubatch & batch) override;
245
351
 
246
- bool llama_kv_cache_seq_rm(
247
- llama_kv_cache * kv,
248
- llama_seq_id seq_id,
249
- llama_pos p0,
250
- llama_pos p1);
352
+ bool get_can_shift() const override;
353
+
354
+ // state write/load
251
355
 
252
- void llama_kv_cache_seq_cp(
253
- llama_kv_cache * kv,
254
- llama_seq_id seq_id_src,
255
- llama_seq_id seq_id_dst,
256
- llama_pos p0,
257
- llama_pos p1);
356
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
357
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
258
358
 
259
- void llama_kv_cache_seq_keep(llama_kv_cache * kv, llama_seq_id seq_id);
359
+ //
360
+ // llama_kv_cache_unified_iswa specific API
361
+ //
260
362
 
261
- void llama_kv_cache_seq_add(
262
- llama_kv_cache * kv,
263
- llama_seq_id seq_id,
264
- llama_pos p0,
265
- llama_pos p1,
266
- llama_pos delta);
363
+ llama_kv_cache_unified * get_kv_base() const;
364
+ llama_kv_cache_unified * get_kv_swa () const;
267
365
 
268
- void llama_kv_cache_seq_div(
269
- llama_kv_cache * kv,
270
- llama_seq_id seq_id,
271
- llama_pos p0,
272
- llama_pos p1,
273
- int d);
366
+ private:
367
+ const llama_hparams & hparams;
274
368
 
275
- llama_pos llama_kv_cache_seq_pos_max(llama_kv_cache * kv, llama_seq_id seq_id);
369
+ bool do_prune = true;
276
370
 
277
- void llama_kv_cache_defrag(llama_kv_cache * kv);
371
+ struct {
372
+ struct entry {
373
+ llama_pos pmin;
374
+ llama_pos pmax;
375
+ };
278
376
 
279
- bool llama_kv_cache_can_shift(const llama_kv_cache * kv);
377
+ void clear() {
378
+ pos.clear();
379
+ }
380
+
381
+ // used to perform SWA pruning of old tokens
382
+ std::unordered_map<llama_seq_id, entry> pos;
383
+ } pending;
384
+
385
+ std::unique_ptr<llama_kv_cache_unified> kv_base;
386
+ std::unique_ptr<llama_kv_cache_unified> kv_swa;
387
+ };
280
388
 
281
389
  //
282
- // kv cache view
390
+ // llama_kv_cache_recurrent
283
391
  //
284
392
 
285
- llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max);
393
+ class llama_kv_cache_recurrent : public llama_kv_cache {
394
+ public:
395
+ struct kv_cell {
396
+ llama_pos pos = -1;
397
+ int32_t src = -1; // used to copy states
398
+ int32_t tail = -1;
399
+
400
+ std::set<llama_seq_id> seq_id;
401
+
402
+ bool has_seq_id(const llama_seq_id & id) const {
403
+ return seq_id.find(id) != seq_id.end();
404
+ }
405
+
406
+ bool is_empty() const {
407
+ return seq_id.empty();
408
+ }
409
+
410
+ bool is_same_seq(const kv_cell & other) const {
411
+ return seq_id == other.seq_id;
412
+ }
413
+ };
414
+
415
+ llama_kv_cache_recurrent(
416
+ const llama_model & model,
417
+ lm_ggml_type type_k,
418
+ lm_ggml_type type_v,
419
+ bool offload,
420
+ uint32_t kv_size,
421
+ uint32_t n_seq_max);
422
+
423
+ ~llama_kv_cache_recurrent() = default;
424
+
425
+ //
426
+ // llama_memory_i
427
+ //
286
428
 
287
- void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv);
429
+ void clear() override;
430
+
431
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
432
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
433
+ void seq_keep(llama_seq_id seq_id) override;
434
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
435
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
436
+
437
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
438
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
439
+
440
+ //
441
+ // llama_kv_cache
442
+ //
443
+
444
+ void restore() override;
445
+ void commit() override;
446
+
447
+ bool update(llama_context & ctx) override;
448
+
449
+ void defrag_sched(float thold) override;
450
+
451
+ void set_full() override;
452
+
453
+ llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
454
+ llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
455
+
456
+ bool find_slot(const llama_ubatch & batch) override;
457
+
458
+ bool get_can_shift() const override;
459
+
460
+ // TODO: temporary methods - they are not really const as they do const_cast<>, fix this
461
+ int32_t s_copy(int i) const;
462
+ float s_mask(int i) const;
463
+
464
+ // state write/load
465
+
466
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
467
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
468
+
469
+ uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
470
+ uint32_t size = 0; // total number of cells, shared across all sequences
471
+ uint32_t used = 0; // used cells (i.e. at least one seq_id)
472
+
473
+ // computed before each graph build
474
+ uint32_t n = 0;
475
+
476
+ std::vector<kv_cell> cells;
477
+
478
+ std::vector<lm_ggml_tensor *> k_l; // per layer
479
+ std::vector<lm_ggml_tensor *> v_l;
480
+
481
+ private:
482
+ //const llama_model & model;
483
+ const llama_hparams & hparams;
484
+
485
+ // commit/restore cache
486
+ // TODO: rework for recurrent cache
487
+ struct slot_range {
488
+ uint32_t c0 = 0; // note: these are cell indices, not sequence positions
489
+ uint32_t c1 = 0;
490
+ };
491
+
492
+ // pending cell updates that are not yet committed
493
+ struct {
494
+ std::vector<slot_range> ranges;
495
+ } pending;
496
+
497
+ const uint32_t n_seq_max = 1;
498
+
499
+ std::vector<lm_ggml_context_ptr> ctxs;
500
+ std::vector<lm_ggml_backend_buffer_ptr> bufs;
501
+
502
+ // find how many cells are currently in use
503
+ uint32_t cell_max() const;
504
+
505
+ size_t total_size() const;
506
+
507
+ size_t size_k_bytes() const;
508
+ size_t size_v_bytes() const;
509
+
510
+ void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
511
+ void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
512
+
513
+ bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
514
+ bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
515
+ };