@novastera-oss/llamarn 0.2.6 → 0.2.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (192) hide show
  1. package/android/src/main/cpp/include/llama.h +134 -36
  2. package/android/src/main/jniLibs/arm64-v8a/libggml-base.so +0 -0
  3. package/android/src/main/jniLibs/arm64-v8a/libggml-cpu.so +0 -0
  4. package/android/src/main/jniLibs/arm64-v8a/libggml.so +0 -0
  5. package/android/src/main/jniLibs/arm64-v8a/libllama.so +0 -0
  6. package/android/src/main/jniLibs/x86_64/libggml-base.so +0 -0
  7. package/android/src/main/jniLibs/x86_64/libggml-cpu.so +0 -0
  8. package/android/src/main/jniLibs/x86_64/libggml.so +0 -0
  9. package/android/src/main/jniLibs/x86_64/libllama.so +0 -0
  10. package/cpp/LlamaCppModel.cpp +2 -2
  11. package/cpp/LlamaCppModel.h +3 -3
  12. package/cpp/PureCppImpl.cpp +1 -1
  13. package/cpp/PureCppImpl.h +2 -2
  14. package/cpp/build-info.cpp +2 -2
  15. package/cpp/llama.cpp/CMakeLists.txt +15 -4
  16. package/cpp/llama.cpp/Makefile +2 -2
  17. package/cpp/llama.cpp/README.md +32 -13
  18. package/cpp/llama.cpp/common/CMakeLists.txt +10 -20
  19. package/cpp/llama.cpp/common/arg.cpp +30 -6
  20. package/cpp/llama.cpp/common/build-info.cpp.in +2 -2
  21. package/cpp/llama.cpp/common/chat-parser.cpp +5 -0
  22. package/cpp/llama.cpp/common/chat-parser.h +2 -0
  23. package/cpp/llama.cpp/common/chat.cpp +12 -9
  24. package/cpp/llama.cpp/common/chat.h +1 -1
  25. package/cpp/llama.cpp/common/common.cpp +50 -40
  26. package/cpp/llama.cpp/common/common.h +5 -2
  27. package/cpp/llama.cpp/common/speculative.cpp +6 -4
  28. package/cpp/llama.cpp/convert_hf_to_gguf.py +97 -56
  29. package/cpp/llama.cpp/ggml/CMakeLists.txt +47 -2
  30. package/cpp/llama.cpp/ggml/cmake/common.cmake +1 -2
  31. package/cpp/llama.cpp/ggml/src/CMakeLists.txt +47 -13
  32. package/cpp/llama.cpp/ggml/src/ggml-backend-reg.cpp +5 -0
  33. package/cpp/llama.cpp/ggml/src/ggml-cann/common.h +6 -1
  34. package/cpp/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +33 -9
  35. package/cpp/llama.cpp/ggml/src/ggml-common.h +4 -0
  36. package/cpp/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +93 -24
  37. package/cpp/llama.cpp/ggml/src/ggml-cpu/amx/amx.cpp +1 -1
  38. package/cpp/llama.cpp/ggml/src/ggml-cpu/amx/mmq.cpp +1 -1
  39. package/cpp/llama.cpp/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  40. package/cpp/llama.cpp/ggml/src/ggml-cpu/arch/arm/quants.c +4113 -0
  41. package/cpp/llama.cpp/ggml/src/ggml-cpu/arch/arm/repack.cpp +2174 -0
  42. package/cpp/llama.cpp/ggml/src/ggml-cpu/arch/loongarch/quants.c +2638 -0
  43. package/cpp/llama.cpp/ggml/src/ggml-cpu/arch/powerpc/quants.c +2731 -0
  44. package/cpp/llama.cpp/ggml/src/ggml-cpu/arch/riscv/quants.c +2068 -0
  45. package/cpp/llama.cpp/ggml/src/ggml-cpu/arch/riscv/repack.cpp +396 -0
  46. package/cpp/llama.cpp/ggml/src/ggml-cpu/arch/s390/quants.c +1299 -0
  47. package/cpp/llama.cpp/ggml/src/ggml-cpu/arch/wasm/quants.c +1480 -0
  48. package/cpp/llama.cpp/ggml/src/ggml-cpu/arch/x86/quants.c +4310 -0
  49. package/cpp/llama.cpp/ggml/src/ggml-cpu/{ggml-cpu-aarch64.cpp → arch/x86/repack.cpp} +59 -3206
  50. package/cpp/llama.cpp/ggml/src/ggml-cpu/arch-fallback.h +184 -0
  51. package/cpp/llama.cpp/ggml/src/ggml-cpu/common.h +1 -1
  52. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h +7 -4
  53. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +10 -2
  54. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +8 -8
  55. package/cpp/llama.cpp/ggml/src/ggml-cpu/{ggml-cpu-hbm.cpp → hbm.cpp} +1 -1
  56. package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +1 -1
  57. package/cpp/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +56 -7
  58. package/cpp/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.h +5 -0
  59. package/cpp/llama.cpp/ggml/src/ggml-cpu/ops.cpp +2 -2
  60. package/cpp/llama.cpp/ggml/src/ggml-cpu/quants.c +1157 -0
  61. package/cpp/llama.cpp/ggml/src/ggml-cpu/{ggml-cpu-quants.h → quants.h} +26 -0
  62. package/cpp/llama.cpp/ggml/src/ggml-cpu/repack.cpp +1555 -0
  63. package/cpp/llama.cpp/ggml/src/ggml-cpu/repack.h +98 -0
  64. package/cpp/llama.cpp/ggml/src/ggml-cpu/simd-mappings.h +2 -4
  65. package/cpp/llama.cpp/ggml/src/ggml-cpu/{ggml-cpu-traits.cpp → traits.cpp} +1 -1
  66. package/cpp/llama.cpp/ggml/src/ggml-cuda/common.cuh +5 -8
  67. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-mma-f16.cuh +4 -1
  68. package/cpp/llama.cpp/ggml/src/ggml-cuda/ggml-cuda.cu +6 -8
  69. package/cpp/llama.cpp/ggml/src/ggml-cuda/ssm-scan.cu +6 -4
  70. package/cpp/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +4 -0
  71. package/cpp/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +11 -10
  72. package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal.m +33 -8
  73. package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal.metal +135 -100
  74. package/cpp/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +7 -0
  75. package/cpp/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +908 -3
  76. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/concat.cl +109 -0
  77. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/mul_mv_id_q4_0_f32_8x_flat.cl +283 -0
  78. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/pad.cl +30 -0
  79. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/repeat.cl +39 -0
  80. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/tanh.cl +63 -0
  81. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/tsembd.cl +48 -0
  82. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/upscale.cl +121 -0
  83. package/cpp/llama.cpp/ggml/src/ggml-quants.c +0 -2
  84. package/cpp/llama.cpp/ggml/src/ggml-rpc/ggml-rpc.cpp +18 -15
  85. package/cpp/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +1 -1
  86. package/cpp/llama.cpp/ggml/src/ggml-sycl/common.hpp +19 -24
  87. package/cpp/llama.cpp/ggml/src/ggml-sycl/convert.cpp +21 -2
  88. package/cpp/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +121 -4
  89. package/cpp/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +32 -0
  90. package/cpp/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +3 -0
  91. package/cpp/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +2 -96
  92. package/cpp/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +164 -38
  93. package/cpp/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +32 -8
  94. package/cpp/llama.cpp/ggml/src/ggml-sycl/quants.hpp +38 -10
  95. package/cpp/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +108 -16
  96. package/cpp/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +26 -29
  97. package/cpp/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +431 -247
  98. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +0 -12
  99. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/conv_transpose_1d.comp +98 -0
  100. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +2 -0
  101. package/cpp/llama.cpp/ggml/src/ggml.c +0 -6
  102. package/cpp/llama.cpp/gguf-py/gguf/constants.py +57 -0
  103. package/cpp/llama.cpp/gguf-py/gguf/gguf_writer.py +4 -1
  104. package/cpp/llama.cpp/gguf-py/gguf/tensor_mapping.py +14 -3
  105. package/cpp/llama.cpp/include/llama.h +134 -36
  106. package/cpp/llama.cpp/requirements/requirements-compare-llama-bench.txt +1 -0
  107. package/cpp/llama.cpp/src/CMakeLists.txt +2 -2
  108. package/cpp/llama.cpp/src/llama-arch.cpp +95 -3
  109. package/cpp/llama.cpp/src/llama-arch.h +7 -1
  110. package/cpp/llama.cpp/src/llama-batch.cpp +270 -19
  111. package/cpp/llama.cpp/src/llama-batch.h +36 -11
  112. package/cpp/llama.cpp/src/llama-chat.cpp +19 -2
  113. package/cpp/llama.cpp/src/llama-chat.h +1 -0
  114. package/cpp/llama.cpp/src/llama-context.cpp +313 -213
  115. package/cpp/llama.cpp/src/llama-context.h +16 -12
  116. package/cpp/llama.cpp/src/llama-cparams.cpp +1 -1
  117. package/cpp/llama.cpp/src/llama-cparams.h +1 -1
  118. package/cpp/llama.cpp/src/llama-graph.cpp +249 -129
  119. package/cpp/llama.cpp/src/llama-graph.h +90 -34
  120. package/cpp/llama.cpp/src/llama-hparams.cpp +6 -2
  121. package/cpp/llama.cpp/src/llama-hparams.h +8 -2
  122. package/cpp/llama.cpp/src/llama-kv-cache-unified-iswa.cpp +82 -50
  123. package/cpp/llama.cpp/src/llama-kv-cache-unified-iswa.h +23 -26
  124. package/cpp/llama.cpp/src/llama-kv-cache-unified.cpp +292 -174
  125. package/cpp/llama.cpp/src/llama-kv-cache-unified.h +68 -38
  126. package/cpp/llama.cpp/src/llama-kv-cells.h +18 -13
  127. package/cpp/llama.cpp/src/llama-memory-hybrid.cpp +247 -0
  128. package/cpp/llama.cpp/src/llama-memory-hybrid.h +143 -0
  129. package/cpp/llama.cpp/src/{llama-kv-cache-recurrent.cpp → llama-memory-recurrent.cpp} +266 -282
  130. package/cpp/llama.cpp/src/{llama-kv-cache-recurrent.h → llama-memory-recurrent.h} +54 -57
  131. package/cpp/llama.cpp/src/llama-memory.cpp +41 -0
  132. package/cpp/llama.cpp/src/llama-memory.h +64 -23
  133. package/cpp/llama.cpp/src/llama-mmap.cpp +1 -1
  134. package/cpp/llama.cpp/src/llama-model-loader.cpp +42 -17
  135. package/cpp/llama.cpp/src/llama-model.cpp +726 -141
  136. package/cpp/llama.cpp/src/llama-model.h +4 -0
  137. package/cpp/llama.cpp/src/llama-quant.cpp +2 -1
  138. package/cpp/llama.cpp/src/llama-vocab.cpp +32 -23
  139. package/cpp/llama.cpp/src/llama.cpp +11 -7
  140. package/cpp/llama.cpp/src/unicode.cpp +5 -0
  141. package/cpp/rn-completion.cpp +2 -2
  142. package/cpp/{rn-llama.hpp → rn-llama.h} +1 -1
  143. package/ios/include/chat.h +1 -1
  144. package/ios/include/common.h +5 -2
  145. package/ios/include/llama.h +134 -36
  146. package/ios/libs/llama.xcframework/Info.plist +18 -18
  147. package/ios/libs/llama.xcframework/ios-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  148. package/ios/libs/llama.xcframework/ios-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4863 -4689
  149. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/llama.h +134 -36
  150. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/llama +0 -0
  151. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  152. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4834 -4710
  153. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3742 -3622
  154. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/llama.h +134 -36
  155. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/llama +0 -0
  156. package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  157. package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4834 -4710
  158. package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3744 -3624
  159. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/llama.h +134 -36
  160. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/llama.h +134 -36
  161. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/llama +0 -0
  162. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/llama.h +134 -36
  163. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/llama +0 -0
  164. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/llama +0 -0
  165. package/ios/libs/llama.xcframework/tvos-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  166. package/ios/libs/llama.xcframework/tvos-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4863 -4689
  167. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/llama.h +134 -36
  168. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/llama +0 -0
  169. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  170. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4834 -4710
  171. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3742 -3622
  172. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/llama.h +134 -36
  173. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/llama +0 -0
  174. package/ios/libs/llama.xcframework/xros-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  175. package/ios/libs/llama.xcframework/xros-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4900 -4725
  176. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/llama.h +134 -36
  177. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/llama +0 -0
  178. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  179. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4871 -4746
  180. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3773 -3652
  181. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/llama.h +134 -36
  182. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/llama +0 -0
  183. package/package.json +1 -2
  184. package/cpp/llama.cpp/common/cmake/build-info-gen-cpp.cmake +0 -24
  185. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +0 -8
  186. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +0 -13891
  187. package/cpp/llama.cpp/src/llama-kv-cache.cpp +0 -1
  188. package/cpp/llama.cpp/src/llama-kv-cache.h +0 -44
  189. /package/cpp/llama.cpp/ggml/src/ggml-cpu/{cpu-feats-x86.cpp → arch/x86/cpu-feats.cpp} +0 -0
  190. /package/cpp/llama.cpp/ggml/src/ggml-cpu/{ggml-cpu-hbm.h → hbm.h} +0 -0
  191. /package/cpp/llama.cpp/ggml/src/ggml-cpu/{ggml-cpu-traits.h → traits.h} +0 -0
  192. /package/cpp/{rn-utils.hpp → rn-utils.h} +0 -0
@@ -2,34 +2,48 @@
2
2
 
3
3
  #include "llama-batch.h"
4
4
  #include "llama-graph.h"
5
- #include "llama-kv-cache.h"
5
+ #include "llama-memory.h"
6
6
 
7
7
  #include <set>
8
8
  #include <vector>
9
9
 
10
10
  //
11
- // llama_kv_cache_recurrent
11
+ // llama_memory_recurrent
12
12
  //
13
13
 
14
- // TODO: extract the KV cache state used for graph computation into llama_kv_cache_recurrent_state_i
14
+ // TODO: extract the cache state used for graph computation into llama_memory_recurrent_state_i
15
15
  // see the implementation of llama_kv_cache_unified_state_i for an example how to do it
16
- class llama_kv_cache_recurrent : public llama_kv_cache {
16
+ class llama_memory_recurrent : public llama_memory_i {
17
17
  public:
18
- llama_kv_cache_recurrent(
19
- const llama_model & model,
20
- ggml_type type_k,
21
- ggml_type type_v,
22
- bool offload,
23
- uint32_t kv_size,
24
- uint32_t n_seq_max);
25
18
 
26
- ~llama_kv_cache_recurrent() = default;
19
+ // this callback is used to filter out layers that should not be included in the cache
20
+ using layer_filter_cb = std::function<bool(int32_t il)>;
21
+
22
+ llama_memory_recurrent(
23
+ const llama_model & model,
24
+ layer_filter_cb && filter,
25
+ ggml_type type_r,
26
+ ggml_type type_s,
27
+ bool offload,
28
+ uint32_t mem_size,
29
+ uint32_t n_seq_max);
30
+
31
+ ~llama_memory_recurrent() = default;
27
32
 
28
33
  //
29
34
  // llama_memory_i
30
35
  //
31
36
 
32
- void clear() override;
37
+ llama_memory_state_ptr init_batch(
38
+ const llama_batch & batch,
39
+ uint32_t n_ubatch,
40
+ bool embd_all) override;
41
+
42
+ llama_memory_state_ptr init_full() override;
43
+
44
+ llama_memory_state_ptr init_update(llama_context * lctx, bool optimize) override;
45
+
46
+ void clear(bool data) override;
33
47
 
34
48
  bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
35
49
  void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
@@ -40,33 +54,13 @@ public:
40
54
  llama_pos seq_pos_min(llama_seq_id seq_id) const override;
41
55
  llama_pos seq_pos_max(llama_seq_id seq_id) const override;
42
56
 
43
- //
44
- // llama_kv_cache
45
- //
46
-
47
- llama_memory_state_ptr init_batch(
48
- const llama_batch & batch,
49
- uint32_t n_ubatch,
50
- bool embd_pooled,
51
- bool logits_all) override;
52
-
53
- llama_memory_state_ptr init_full() override;
54
-
55
- bool update(llama_context & lctx) override;
56
-
57
- void defrag_sched(float thold) override;
58
-
59
57
  bool prepare(const std::vector<llama_ubatch> & ubatches);
60
58
 
61
- // find a contiguous slot of kv cells and emplace the ubatch there
59
+ // find a contiguous slot of memory cells and emplace the ubatch there
62
60
  bool find_slot(const llama_ubatch & ubatch);
63
61
 
64
62
  bool get_can_shift() const override;
65
63
 
66
- // TODO: temporary methods - they are not really const as they do const_cast<>, fix this
67
- int32_t s_copy(int i) const;
68
- float s_mask(int i) const;
69
-
70
64
  // state write/load
71
65
 
72
66
  void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
@@ -79,10 +73,14 @@ public:
79
73
  // computed before each graph build
80
74
  uint32_t n = 0;
81
75
 
76
+ // first zero-ed state
77
+ int32_t rs_z = -1;
78
+
82
79
  // TODO: optimize for recurrent state needs
83
- struct kv_cell {
80
+ struct mem_cell {
84
81
  llama_pos pos = -1;
85
- int32_t src = -1; // used to copy states
82
+ int32_t src = -1; // used to know where states should be copied from
83
+ int32_t src0 = -1; // like src, but only used when setting the inputs (allowing to copy once)
86
84
  int32_t tail = -1;
87
85
 
88
86
  std::set<llama_seq_id> seq_id;
@@ -95,15 +93,16 @@ public:
95
93
  return seq_id.empty();
96
94
  }
97
95
 
98
- bool is_same_seq(const kv_cell & other) const {
96
+ bool is_same_seq(const mem_cell & other) const {
99
97
  return seq_id == other.seq_id;
100
98
  }
101
99
  };
102
100
 
103
- std::vector<kv_cell> cells;
101
+ std::vector<mem_cell> cells;
104
102
 
105
- std::vector<ggml_tensor *> k_l; // per layer
106
- std::vector<ggml_tensor *> v_l;
103
+ // per layer
104
+ std::vector<ggml_tensor *> r_l;
105
+ std::vector<ggml_tensor *> s_l;
107
106
 
108
107
  private:
109
108
  //const llama_model & model;
@@ -116,8 +115,8 @@ private:
116
115
 
117
116
  size_t total_size() const;
118
117
 
119
- size_t size_k_bytes() const;
120
- size_t size_v_bytes() const;
118
+ size_t size_r_bytes() const;
119
+ size_t size_s_bytes() const;
121
120
 
122
121
  void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
123
122
  void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
@@ -126,24 +125,22 @@ private:
126
125
  bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
127
126
  };
128
127
 
129
- class llama_kv_cache_recurrent_state : public llama_memory_state_i {
128
+ class llama_memory_recurrent_state : public llama_memory_state_i {
130
129
  public:
131
130
  // used for errors
132
- llama_kv_cache_recurrent_state(llama_memory_status status);
131
+ llama_memory_recurrent_state(llama_memory_status status);
133
132
 
134
133
  // used to create a full-cache state
135
- llama_kv_cache_recurrent_state(
136
- llama_memory_status status,
137
- llama_kv_cache_recurrent * kv);
134
+ llama_memory_recurrent_state(
135
+ llama_memory_recurrent * mem);
138
136
 
139
137
  // used to create a state from a batch
140
- llama_kv_cache_recurrent_state(
141
- llama_memory_status status,
142
- llama_kv_cache_recurrent * kv,
138
+ llama_memory_recurrent_state(
139
+ llama_memory_recurrent * mem,
143
140
  llama_sbatch sbatch,
144
141
  std::vector<llama_ubatch> ubatches);
145
142
 
146
- virtual ~llama_kv_cache_recurrent_state();
143
+ virtual ~llama_memory_recurrent_state();
147
144
 
148
145
  //
149
146
  // llama_memory_state_i
@@ -158,23 +155,23 @@ public:
158
155
  const llama_ubatch & get_ubatch() const override;
159
156
 
160
157
  //
161
- // llama_kv_cache_recurrent_state specific API
158
+ // llama_memory_recurrent_state specific API
162
159
  //
163
160
 
164
- uint32_t get_n_kv() const;
161
+ uint32_t get_n_rs() const;
165
162
  uint32_t get_head() const;
163
+ int32_t get_rs_z() const;
166
164
  uint32_t get_size() const;
167
165
 
168
- ggml_tensor * get_k_l(int32_t il) const;
169
- ggml_tensor * get_v_l(int32_t il) const;
166
+ ggml_tensor * get_r_l(int32_t il) const;
167
+ ggml_tensor * get_s_l(int32_t il) const;
170
168
 
171
169
  int32_t s_copy(int i) const;
172
- float s_mask(int i) const;
173
170
 
174
171
  private:
175
172
  const llama_memory_status status;
176
173
 
177
- llama_kv_cache_recurrent * kv;
174
+ llama_memory_recurrent * mem;
178
175
 
179
176
  llama_sbatch sbatch;
180
177
 
@@ -1 +1,42 @@
1
1
  #include "llama-memory.h"
2
+
3
+ llama_memory_status llama_memory_status_combine(llama_memory_status s0, llama_memory_status s1) {
4
+ bool has_update = false;
5
+
6
+ switch (s0) {
7
+ case LLAMA_MEMORY_STATUS_SUCCESS:
8
+ {
9
+ has_update = true;
10
+ break;
11
+ }
12
+ case LLAMA_MEMORY_STATUS_NO_UPDATE:
13
+ {
14
+ break;
15
+ }
16
+ case LLAMA_MEMORY_STATUS_FAILED_PREPARE:
17
+ case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
18
+ {
19
+ return s0;
20
+ }
21
+ }
22
+
23
+ switch (s1) {
24
+ case LLAMA_MEMORY_STATUS_SUCCESS:
25
+ {
26
+ has_update = true;
27
+ break;
28
+ }
29
+ case LLAMA_MEMORY_STATUS_NO_UPDATE:
30
+ {
31
+ break;
32
+ }
33
+ case LLAMA_MEMORY_STATUS_FAILED_PREPARE:
34
+ case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
35
+ {
36
+ return s1;
37
+ }
38
+ }
39
+
40
+ // if either status has an update, then the combined status has an update
41
+ return has_update ? LLAMA_MEMORY_STATUS_SUCCESS : LLAMA_MEMORY_STATUS_NO_UPDATE;
42
+ }
@@ -7,6 +7,9 @@
7
7
 
8
8
  struct llama_ubatch;
9
9
 
10
+ class llama_io_write_i;
11
+ class llama_io_read_i;
12
+
10
13
  struct llama_memory_params {
11
14
  // kv cache
12
15
  ggml_type type_k;
@@ -16,32 +19,17 @@ struct llama_memory_params {
16
19
  bool swa_full;
17
20
  };
18
21
 
19
- // general concept of LLM memory
20
- // the KV cache is a type of LLM memory, but there can be other types
21
- class llama_memory_i {
22
- public:
23
- virtual ~llama_memory_i() = default;
24
-
25
- virtual void clear() = 0;
26
-
27
- virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0;
28
- virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0;
29
- virtual void seq_keep(llama_seq_id seq_id) = 0;
30
- virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) = 0;
31
- virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0;
32
-
33
- virtual llama_pos seq_pos_min(llama_seq_id seq_id) const = 0;
34
- virtual llama_pos seq_pos_max(llama_seq_id seq_id) const = 0;
35
-
36
- virtual bool get_can_edit() const = 0;
37
- };
38
-
39
22
  enum llama_memory_status {
40
23
  LLAMA_MEMORY_STATUS_SUCCESS = 0,
24
+ LLAMA_MEMORY_STATUS_NO_UPDATE,
41
25
  LLAMA_MEMORY_STATUS_FAILED_PREPARE,
42
26
  LLAMA_MEMORY_STATUS_FAILED_COMPUTE,
43
27
  };
44
28
 
29
+ // helper function for combining the status of two memory states
30
+ // useful for implementing hybrid memory types (e.g. iSWA)
31
+ llama_memory_status llama_memory_status_combine(llama_memory_status s0, llama_memory_status s1);
32
+
45
33
  // the interface for managing the memory state during batch processing
46
34
  // this interface is implemented per memory type. see:
47
35
  // - llama_kv_cache_unified_state
@@ -51,8 +39,7 @@ enum llama_memory_status {
51
39
  // the only method that can mutate the memory and the memory state is llama_memory_i::apply()
52
40
  //
53
41
  // TODO: rename to llama_memory_context_i ?
54
- class llama_memory_state_i {
55
- public:
42
+ struct llama_memory_state_i {
56
43
  virtual ~llama_memory_state_i() = default;
57
44
 
58
45
  // consume the current ubatch from the state and proceed to the next one
@@ -69,8 +56,62 @@ public:
69
56
  // get the current ubatch
70
57
  virtual const llama_ubatch & get_ubatch() const = 0;
71
58
 
72
- // get the status of the memory state
59
+ // get the status of the memory state - used for error handling and checking if any updates would be applied
73
60
  virtual llama_memory_status get_status() const = 0;
74
61
  };
75
62
 
76
63
  using llama_memory_state_ptr = std::unique_ptr<llama_memory_state_i>;
64
+
65
+ // general concept of LLM memory
66
+ // the KV cache is a type of LLM memory, but there can be other types
67
+ struct llama_memory_i {
68
+ virtual ~llama_memory_i() = default;
69
+
70
+ // split the input batch into a set of ubatches and verify that they can fit into the cache
71
+ // return a state object containing the ubatches and KV cache state required to process them
72
+ // check the llama_memory_state_i::get_status() for the result
73
+ virtual llama_memory_state_ptr init_batch(
74
+ const llama_batch & batch,
75
+ uint32_t n_ubatch,
76
+ bool embd_all) = 0;
77
+
78
+ // simulate full cache, used for allocating worst-case compute buffers
79
+ virtual llama_memory_state_ptr init_full() = 0;
80
+
81
+ // prepare for any pending memory updates, such as shifts, defrags, etc.
82
+ // status == LLAMA_MEMORY_STATUS_NO_UPDATE if there is nothing to update
83
+ virtual llama_memory_state_ptr init_update(llama_context * lctx, bool optimize) = 0;
84
+
85
+ // getters
86
+ virtual bool get_can_shift() const = 0;
87
+
88
+ //
89
+ // ops
90
+ //
91
+
92
+ // if data == true, the data buffers will also be cleared together with the metadata
93
+ virtual void clear(bool data) = 0;
94
+
95
+ virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0;
96
+ virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0;
97
+ virtual void seq_keep(llama_seq_id seq_id) = 0;
98
+ virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) = 0;
99
+ virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0;
100
+
101
+ virtual llama_pos seq_pos_min(llama_seq_id seq_id) const = 0;
102
+ virtual llama_pos seq_pos_max(llama_seq_id seq_id) const = 0;
103
+
104
+ //
105
+ // state write/read
106
+ //
107
+
108
+ virtual void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const = 0;
109
+ virtual void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) = 0;
110
+ };
111
+
112
+ using llama_memory_ptr = std::unique_ptr<llama_memory_i>;
113
+
114
+ // TODO: temporary until the llama_kv_cache is removed from the public API
115
+ struct llama_kv_cache : public llama_memory_i {
116
+ virtual ~llama_kv_cache() = default;
117
+ };
@@ -401,7 +401,7 @@ struct llama_mmap::impl {
401
401
  }
402
402
  }
403
403
  #else
404
- throw std::runtime_error("PrefetchVirtualMemory unavailable");
404
+ LLAMA_LOG_DEBUG("skipping PrefetchVirtualMemory because _WIN32_WINNT < 0x602\n");
405
405
  #endif
406
406
  }
407
407
  }
@@ -288,9 +288,10 @@ namespace GGUFMeta {
288
288
 
289
289
  template<typename T>
290
290
  bool llama_model_loader::get_arr(const std::string & key, std::vector<T> & result, bool required) {
291
- const int kid = gguf_find_key(meta.get(), key.c_str());
291
+ const gguf_context * ctx = meta.get();
292
+ const int kid = gguf_find_key(ctx, key.c_str());
292
293
 
293
- if (kid < 0 || gguf_get_kv_type(meta.get(), kid) != GGUF_TYPE_ARRAY) {
294
+ if (kid < 0 || gguf_get_kv_type(ctx, kid) != GGUF_TYPE_ARRAY) {
294
295
  if (required) {
295
296
  throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
296
297
  }
@@ -298,28 +299,40 @@ namespace GGUFMeta {
298
299
  }
299
300
 
300
301
  struct GGUFMeta::ArrayInfo arr_info =
301
- GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
302
+ GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(ctx, kid);
302
303
 
303
304
  switch (arr_info.gt) {
304
305
  case GGUF_TYPE_UINT32:
305
- case GGUF_TYPE_INT32: GGML_ASSERT((std::is_same<T, int32_t>::value) ||
306
- (std::is_same<T, uint32_t>::value)); break;
307
- case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same<T, float>::value)); break;
306
+ case GGUF_TYPE_INT32: GGML_ASSERT((std::is_same<T, int32_t>::value) ||
307
+ (std::is_same<T, uint32_t>::value)); break;
308
+ case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same<T, float>::value)); break;
309
+ case GGUF_TYPE_STRING: GGML_ASSERT((std::is_same<T, std::string>::value)); break;
308
310
  default:
309
- throw std::runtime_error(format("%s is not a float32/uint32/int32 array", key.c_str()));
311
+ throw std::runtime_error(format("%s is not a string/float32/uint32/int32 array", key.c_str()));
310
312
  }
311
313
 
312
- result.resize(arr_info.length);
313
- result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length);
314
+ if constexpr (std::is_same<T, std::string>::value) {
315
+ const size_t n_items = gguf_get_arr_n(ctx, kid);
316
+ result.clear();
317
+
318
+ for (size_t i = 0; i < n_items; i++) {
319
+ const T value = gguf_get_arr_str(ctx, kid, i);
320
+ result.emplace_back(value);
321
+ }
322
+ } else {
323
+ result.resize(arr_info.length);
324
+ result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length);
325
+ }
314
326
 
315
327
  return true;
316
328
  }
317
329
 
318
330
  template<typename T, size_t N_MAX>
319
331
  bool llama_model_loader::get_arr(const std::string & key, std::array<T, N_MAX> & result, bool required) {
320
- const int kid = gguf_find_key(meta.get(), key.c_str());
332
+ const gguf_context * ctx = meta.get();
333
+ const int kid = gguf_find_key(ctx, key.c_str());
321
334
 
322
- if (kid < 0 || gguf_get_kv_type(meta.get(), kid) != GGUF_TYPE_ARRAY) {
335
+ if (kid < 0 || gguf_get_kv_type(ctx, kid) != GGUF_TYPE_ARRAY) {
323
336
  if (required) {
324
337
  throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
325
338
  }
@@ -327,22 +340,32 @@ namespace GGUFMeta {
327
340
  }
328
341
 
329
342
  struct GGUFMeta::ArrayInfo arr_info =
330
- GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(meta.get(), kid);
343
+ GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(ctx, kid);
331
344
 
332
345
  switch (arr_info.gt) {
333
346
  case GGUF_TYPE_UINT32:
334
- case GGUF_TYPE_INT32: GGML_ASSERT((std::is_same<T, int32_t>::value) ||
335
- (std::is_same<T, uint32_t>::value)); break;
336
- case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same<T, float>::value)); break;
347
+ case GGUF_TYPE_INT32: GGML_ASSERT((std::is_same<T, int32_t>::value) ||
348
+ (std::is_same<T, uint32_t>::value)); break;
349
+ case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same<T, float>::value)); break;
350
+ case GGUF_TYPE_STRING: GGML_ASSERT((std::is_same<T, std::string>::value)); break;
337
351
  default:
338
- throw std::runtime_error(format("%s is not a float32/uint32/int32 array", key.c_str()));
352
+ throw std::runtime_error(format("%s is not a string/float32/uint32/int32 array", key.c_str()));
339
353
  }
340
354
 
341
355
  if (arr_info.length > N_MAX) {
342
356
  throw std::runtime_error(format("array length %u for key %s exceeds max %u", (uint32_t) arr_info.length, key.c_str(), (uint32_t) N_MAX));
343
357
  }
344
358
 
345
- std::copy((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length, result.begin());
359
+ if constexpr (std::is_same<T, std::string>::value) {
360
+ const size_t n_items = gguf_get_arr_n(ctx, kid);
361
+
362
+ for (size_t i = 0; i < n_items; i++) {
363
+ const T value = gguf_get_arr_str(ctx, kid, i);
364
+ result[i] = value;
365
+ }
366
+ } else {
367
+ std::copy((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length, result.begin());
368
+ }
346
369
 
347
370
  return true;
348
371
  }
@@ -352,6 +375,8 @@ namespace GGUFMeta {
352
375
  return get_arr(llm_kv(kid), result, required);
353
376
  }
354
377
 
378
+ template bool llama_model_loader::get_arr<std::vector<std::string>>(enum llm_kv kid, std::vector<std::string> & result, bool required);
379
+
355
380
  template<typename T>
356
381
  bool llama_model_loader::get_key(const std::string & key, T & result, bool required) {
357
382
  auto it = kv_overrides.find(key);