cui-llama.rn 1.6.1 → 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. package/android/src/main/CMakeLists.txt +6 -0
  2. package/android/src/main/java/com/rnllama/LlamaContext.java +38 -5
  3. package/android/src/main/java/com/rnllama/RNLlama.java +139 -4
  4. package/android/src/main/jni.cpp +153 -14
  5. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  9. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  10. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  11. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  12. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  13. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +24 -4
  14. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +22 -2
  15. package/cpp/chat.cpp +128 -106
  16. package/cpp/chat.h +2 -0
  17. package/cpp/common.cpp +41 -76
  18. package/cpp/common.h +23 -19
  19. package/cpp/ggml-backend.cpp +9 -5
  20. package/cpp/ggml-backend.h +4 -4
  21. package/cpp/ggml-cpu/ggml-cpu-aarch64.cpp +0 -2
  22. package/cpp/ggml-cpu/ggml-cpu-quants.c +306 -6
  23. package/cpp/ggml-cpu/ggml-cpu.c +5 -13
  24. package/cpp/ggml-cpu/ggml-cpu.cpp +29 -16
  25. package/cpp/ggml-cpu/ops.cpp +107 -13
  26. package/cpp/ggml-cpu/vec.cpp +0 -6
  27. package/cpp/ggml-cpu/vec.h +16 -0
  28. package/cpp/ggml-llama-sim.metallib +0 -0
  29. package/cpp/ggml-llama.metallib +0 -0
  30. package/cpp/ggml-metal-impl.h +36 -11
  31. package/cpp/ggml-metal.m +321 -132
  32. package/cpp/ggml-opt.cpp +373 -190
  33. package/cpp/ggml-opt.h +49 -28
  34. package/cpp/ggml-quants.c +0 -6
  35. package/cpp/ggml.c +93 -38
  36. package/cpp/ggml.h +21 -7
  37. package/cpp/gguf.cpp +33 -33
  38. package/cpp/llama-adapter.cpp +6 -0
  39. package/cpp/llama-arch.cpp +3 -0
  40. package/cpp/llama-batch.cpp +3 -1
  41. package/cpp/llama-chat.cpp +8 -6
  42. package/cpp/llama-chat.h +1 -0
  43. package/cpp/llama-context.cpp +349 -135
  44. package/cpp/llama-context.h +30 -3
  45. package/cpp/llama-cparams.h +1 -0
  46. package/cpp/llama-graph.cpp +150 -234
  47. package/cpp/llama-graph.h +52 -7
  48. package/cpp/llama-hparams.cpp +17 -1
  49. package/cpp/llama-hparams.h +34 -5
  50. package/cpp/llama-kv-cache.cpp +662 -321
  51. package/cpp/llama-kv-cache.h +203 -93
  52. package/cpp/llama-memory.h +3 -2
  53. package/cpp/llama-model-loader.cpp +24 -15
  54. package/cpp/llama-model-saver.cpp +281 -0
  55. package/cpp/llama-model-saver.h +37 -0
  56. package/cpp/llama-model.cpp +536 -132
  57. package/cpp/llama-model.h +7 -1
  58. package/cpp/llama-sampling.cpp +18 -6
  59. package/cpp/llama-vocab.cpp +46 -8
  60. package/cpp/llama-vocab.h +6 -0
  61. package/cpp/llama.cpp +14 -0
  62. package/cpp/llama.h +72 -131
  63. package/cpp/minja/chat-template.hpp +9 -5
  64. package/cpp/minja/minja.hpp +69 -36
  65. package/cpp/rn-llama.cpp +611 -47
  66. package/cpp/rn-llama.h +33 -3
  67. package/cpp/sampling.cpp +57 -50
  68. package/cpp/tools/mtmd/clip-impl.h +462 -0
  69. package/cpp/tools/mtmd/clip.cpp +4024 -0
  70. package/cpp/tools/mtmd/clip.h +101 -0
  71. package/cpp/tools/mtmd/miniaudio.h +93468 -0
  72. package/cpp/tools/mtmd/mtmd-audio.cpp +855 -0
  73. package/cpp/tools/mtmd/mtmd-audio.h +62 -0
  74. package/cpp/tools/mtmd/mtmd-helper.cpp +297 -0
  75. package/cpp/tools/mtmd/mtmd.cpp +942 -0
  76. package/cpp/tools/mtmd/mtmd.h +362 -0
  77. package/cpp/tools/mtmd/stb_image.h +7988 -0
  78. package/ios/CMakeLists.txt +7 -0
  79. package/ios/RNLlama.mm +77 -3
  80. package/ios/RNLlamaContext.h +5 -1
  81. package/ios/RNLlamaContext.mm +105 -10
  82. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +2 -0
  83. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +23 -19
  84. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
  85. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  86. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
  87. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +21 -7
  88. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
  89. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +30 -3
  90. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
  91. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +52 -7
  92. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +34 -5
  93. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  94. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +3 -2
  95. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
  96. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +7 -1
  97. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
  98. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +72 -131
  99. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  100. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
  101. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +33 -3
  102. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
  103. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  104. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
  105. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
  106. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +23 -19
  107. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
  108. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  109. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
  110. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +21 -7
  111. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
  112. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +30 -3
  113. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
  114. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +52 -7
  115. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +34 -5
  116. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  117. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +3 -2
  118. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
  119. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +7 -1
  120. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
  121. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +72 -131
  122. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  123. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
  124. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +33 -3
  125. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  126. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
  127. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  128. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  129. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +2 -0
  130. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +23 -19
  131. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
  132. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  133. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
  134. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +21 -7
  135. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
  136. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +30 -3
  137. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
  138. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +52 -7
  139. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +34 -5
  140. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  141. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +3 -2
  142. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
  143. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +7 -1
  144. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
  145. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +72 -131
  146. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  147. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
  148. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +33 -3
  149. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
  150. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  151. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
  152. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
  153. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +23 -19
  154. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
  155. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  156. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
  157. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +21 -7
  158. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
  159. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +30 -3
  160. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
  161. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +52 -7
  162. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +34 -5
  163. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  164. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +3 -2
  165. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
  166. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +7 -1
  167. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
  168. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +72 -131
  169. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  170. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
  171. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +33 -3
  172. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  173. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
  174. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  175. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  176. package/jest/mock.js +33 -7
  177. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  178. package/lib/commonjs/index.js +153 -21
  179. package/lib/commonjs/index.js.map +1 -1
  180. package/lib/module/NativeRNLlama.js.map +1 -1
  181. package/lib/module/index.js +152 -20
  182. package/lib/module/index.js.map +1 -1
  183. package/lib/typescript/NativeRNLlama.d.ts +50 -4
  184. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  185. package/lib/typescript/index.d.ts +72 -6
  186. package/lib/typescript/index.d.ts.map +1 -1
  187. package/package.json +1 -1
  188. package/src/NativeRNLlama.ts +67 -4
  189. package/src/index.ts +212 -38
  190. package/lib/commonjs/chat.js +0 -37
  191. package/lib/commonjs/chat.js.map +0 -1
  192. package/lib/module/chat.js +0 -33
  193. package/lib/module/chat.js.map +0 -1
  194. package/lib/typescript/chat.d.ts +0 -10
  195. package/lib/typescript/chat.d.ts.map +0 -1
  196. package/src/chat.ts +0 -44
@@ -8,6 +8,7 @@
8
8
  #include "ggml-cpp.h"
9
9
 
10
10
  #include <set>
11
+ #include <unordered_map>
11
12
  #include <vector>
12
13
 
13
14
  struct llama_cparams;
@@ -40,6 +41,9 @@ struct llama_kv_cache : public llama_memory_i {
40
41
  // batch processing
41
42
  //
42
43
 
44
+ // =============================================================================================================
45
+ // TODO: refactor and simplify this
46
+
43
47
  virtual llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) = 0;
44
48
 
45
49
  // different KV caches require different batch splitting strategies
@@ -48,11 +52,10 @@ struct llama_kv_cache : public llama_memory_i {
48
52
  // find an empty slot of size "n_tokens" in the cache
49
53
  virtual bool find_slot(const llama_ubatch & batch) = 0;
50
54
 
55
+ // =============================================================================================================
56
+
51
57
  // getters
52
- virtual int32_t get_n_tokens() const = 0;
53
- virtual int32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache
54
- virtual llama_pos get_pos_max() const = 0;
55
- virtual bool get_can_shift() const = 0;
58
+ virtual bool get_can_shift() const = 0;
56
59
 
57
60
  bool get_can_edit() const override { return get_can_shift(); }
58
61
 
@@ -87,38 +90,25 @@ private:
87
90
  // llama_kv_cache_unified
88
91
  //
89
92
 
90
- // TODO: add notion of max sequences
91
93
  class llama_kv_cache_unified : public llama_kv_cache {
92
94
  public:
93
- struct kv_cell {
94
- llama_pos pos = -1;
95
- llama_pos delta = 0;
96
-
97
- std::set<llama_seq_id> seq_id;
98
-
99
- bool has_seq_id(const llama_seq_id & id) const {
100
- return seq_id.find(id) != seq_id.end();
101
- }
102
-
103
- bool is_empty() const {
104
- return seq_id.empty();
105
- }
106
-
107
- bool is_same_seq(const kv_cell & other) const {
108
- return seq_id == other.seq_id;
109
- }
110
- };
111
-
112
95
  static uint32_t get_padding(const llama_cparams & cparams);
113
96
 
97
+ // this callback is used to filter out layers that should not be included in the cache
98
+ using layer_filter_cb = std::function<bool(int32_t il)>;
99
+
114
100
  llama_kv_cache_unified(
115
- const llama_model & model,
116
- lm_ggml_type type_k,
117
- lm_ggml_type type_v,
118
- bool v_trans,
119
- bool offload,
120
- uint32_t kv_size,
121
- uint32_t padding);
101
+ const llama_model & model,
102
+ layer_filter_cb && filter,
103
+ lm_ggml_type type_k,
104
+ lm_ggml_type type_v,
105
+ bool v_trans,
106
+ bool offload,
107
+ uint32_t kv_size,
108
+ uint32_t n_seq_max,
109
+ uint32_t n_pad,
110
+ uint32_t n_swa,
111
+ llama_swa_type swa_type);
122
112
 
123
113
  ~llama_kv_cache_unified() = default;
124
114
 
@@ -130,10 +120,11 @@ public:
130
120
 
131
121
  bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
132
122
  void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
133
- void seq_keep(llama_seq_id seq_id) override;
123
+ void seq_keep(llama_seq_id seq_id) override;
134
124
  void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
135
125
  void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
136
126
 
127
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
137
128
  llama_pos seq_pos_max(llama_seq_id seq_id) const override;
138
129
 
139
130
  //
@@ -150,7 +141,6 @@ public:
150
141
  void set_full() override;
151
142
 
152
143
  llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
153
-
154
144
  llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
155
145
 
156
146
  // updates the cache head
@@ -158,53 +148,106 @@ public:
158
148
  // to the first cell of the slot.
159
149
  bool find_slot(const llama_ubatch & batch) override;
160
150
 
161
- int32_t get_n_tokens() const override;
162
- int32_t get_used_cells() const override;
163
-
164
- // TODO: better data structures to reduce the cost of this operation
165
- llama_pos get_pos_max() const override;
166
-
167
151
  bool get_can_shift() const override;
168
152
 
169
153
  // state write/load
170
154
 
171
155
  void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
172
- void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
156
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
173
157
 
174
- // Note: The value of head isn't only used to optimize searching
175
- // for a free KV slot. llama_decode_impl also uses it, so it
176
- // cannot be freely changed after a slot has been allocated.
177
- uint32_t head = 0;
178
- uint32_t size = 0;
179
- uint32_t used = 0; // used cells (i.e. at least one seq_id)
158
+ //
159
+ // llama_kv_cache_unified specific API
160
+ //
180
161
 
181
- // computed before each graph build
182
- uint32_t n = 0;
162
+ uint32_t get_n() const;
163
+ uint32_t get_size() const;
183
164
 
184
- std::vector<kv_cell> cells;
165
+ // get views of the current state of the cache
166
+ lm_ggml_tensor * get_k(lm_ggml_context * ctx, int32_t il) const;
167
+ lm_ggml_tensor * get_v(lm_ggml_context * ctx, int32_t il) const;
185
168
 
186
- std::vector<lm_ggml_tensor *> k_l; // per layer
187
- std::vector<lm_ggml_tensor *> v_l;
169
+ // store k_cur and v_cur in the cache based on the current head location
170
+ lm_ggml_tensor * cpy_k(lm_ggml_context * ctx, lm_ggml_tensor * k_cur, int32_t il) const;
171
+ lm_ggml_tensor * cpy_v(lm_ggml_context * ctx, lm_ggml_tensor * v_cur, int32_t il) const;
172
+
173
+ void prune_swa(llama_seq_id seq_id, llama_pos pmin, llama_pos pmax);
174
+
175
+ void set_input_kq_mask (lm_ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
176
+ void set_input_k_shift (lm_ggml_tensor * dst) const;
177
+ void set_input_pos_bucket(lm_ggml_tensor * dst, const llama_ubatch * ubatch) const;
188
178
 
189
179
  private:
190
180
  const llama_model & model;
191
181
  const llama_hparams & hparams;
192
182
 
183
+ struct kv_cell {
184
+ llama_pos pos = -1;
185
+ llama_pos delta = 0;
186
+
187
+ // TODO: replace with bitset uint64_t
188
+ std::set<llama_seq_id> seq_id;
189
+
190
+ bool has_seq_id(const llama_seq_id & id) const {
191
+ return seq_id.find(id) != seq_id.end();
192
+ }
193
+
194
+ bool is_empty() const {
195
+ return seq_id.empty();
196
+ }
197
+
198
+ bool is_same_seq(const kv_cell & other) const {
199
+ return seq_id == other.seq_id;
200
+ }
201
+ };
202
+
203
+ struct kv_layer {
204
+ // layer index in the model
205
+ // note: can be different from the layer index in the KV cache
206
+ uint32_t il;
207
+
208
+ lm_ggml_tensor * k;
209
+ lm_ggml_tensor * v;
210
+ };
211
+
193
212
  bool has_shift = false;
194
213
  bool do_defrag = false;
195
-
196
214
  bool v_trans = true; // the value tensor is transposed
197
- bool can_shift = false;
215
+
216
+ uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
217
+ uint32_t size = 0; // total number of cells, shared across all sequences
218
+ uint32_t used = 0; // used cells (i.e. at least one seq_id) (TODO: add `struct kv_cells` and keep track automaticallt)
219
+
220
+ // computed before each graph build
221
+ uint32_t n = 0;
222
+
223
+ const uint32_t n_seq_max = 1;
198
224
 
199
225
  // required padding
200
- uint32_t padding = 1;
226
+ const uint32_t n_pad = 1;
227
+
228
+ // SWA
229
+ const uint32_t n_swa = 0;
201
230
 
202
- lm_ggml_type type_k = LM_GGML_TYPE_F16;
203
- lm_ggml_type type_v = LM_GGML_TYPE_F16;
231
+ const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
204
232
 
205
233
  std::vector<lm_ggml_context_ptr> ctxs;
206
234
  std::vector<lm_ggml_backend_buffer_ptr> bufs;
207
235
 
236
+ std::vector<kv_cell> cells; // TODO: replace with `struct kv_cells`
237
+ std::vector<kv_layer> layers;
238
+
239
+ // model layer id -> KV cache layer id
240
+ std::unordered_map<int32_t, int32_t> map_layer_ids;
241
+
242
+ // recovery information used to restore the KV cells to their original state in case of a failure
243
+ struct {
244
+ void clear() {
245
+ cells.clear();
246
+ }
247
+
248
+ std::unordered_map<uint32_t, kv_cell> cells;
249
+ } recovery;
250
+
208
251
  // defrag
209
252
  struct {
210
253
  std::vector<uint32_t> ids;
@@ -213,17 +256,6 @@ private:
213
256
  // return true if cells have been moved
214
257
  bool defrag_prepare(int32_t n_max_nodes);
215
258
 
216
- // commit/restore cache
217
- struct slot_range {
218
- uint32_t c0 = 0; // note: these are cell indices, not sequence positions
219
- uint32_t c1 = 0;
220
- };
221
-
222
- // pending cell updates that are not yet committed
223
- struct {
224
- std::vector<slot_range> ranges;
225
- } pending;
226
-
227
259
  // find how many cells are currently in use
228
260
  uint32_t cell_max() const;
229
261
 
@@ -232,6 +264,8 @@ private:
232
264
  size_t size_k_bytes() const;
233
265
  size_t size_v_bytes() const;
234
266
 
267
+ bool is_masked_swa(llama_pos p0, llama_pos p1) const;
268
+
235
269
  lm_ggml_tensor * build_rope_shift(
236
270
  const llama_cparams & cparams,
237
271
  lm_ggml_context * ctx,
@@ -258,6 +292,100 @@ private:
258
292
  bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
259
293
  };
260
294
 
295
+ //
296
+ // llama_kv_cache_unified_iswa
297
+ //
298
+
299
+ // utilizes two instances of llama_kv_cache_unified
300
+ // the first instance is for the non-SWA layers of the model and the second instance is for the SWA layers
301
+ // upon successful commit, the SWA cache removes old tokens outside the n_swa window
302
+
303
+ class llama_kv_cache_unified_iswa : public llama_kv_cache {
304
+ public:
305
+ llama_kv_cache_unified_iswa(
306
+ const llama_model & model,
307
+ lm_ggml_type type_k,
308
+ lm_ggml_type type_v,
309
+ bool v_trans,
310
+ bool offload,
311
+ bool swa_full,
312
+ uint32_t kv_size,
313
+ uint32_t n_seq_max,
314
+ uint32_t n_batch,
315
+ uint32_t n_pad);
316
+
317
+ ~llama_kv_cache_unified_iswa() = default;
318
+
319
+ //
320
+ // llama_memory_i
321
+ //
322
+
323
+ void clear() override;
324
+
325
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
326
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
327
+ void seq_keep(llama_seq_id seq_id) override;
328
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
329
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
330
+
331
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
332
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
333
+
334
+ //
335
+ // llama_kv_cache
336
+ //
337
+
338
+ void restore() override;
339
+ void commit() override;
340
+
341
+ bool update(llama_context & ctx) override;
342
+
343
+ void defrag_sched(float thold) override;
344
+
345
+ void set_full() override;
346
+
347
+ llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
348
+ llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
349
+
350
+ bool find_slot(const llama_ubatch & batch) override;
351
+
352
+ bool get_can_shift() const override;
353
+
354
+ // state write/load
355
+
356
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
357
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
358
+
359
+ //
360
+ // llama_kv_cache_unified_iswa specific API
361
+ //
362
+
363
+ llama_kv_cache_unified * get_kv_base() const;
364
+ llama_kv_cache_unified * get_kv_swa () const;
365
+
366
+ private:
367
+ const llama_hparams & hparams;
368
+
369
+ bool do_prune = true;
370
+
371
+ struct {
372
+ struct entry {
373
+ llama_pos pmin;
374
+ llama_pos pmax;
375
+ };
376
+
377
+ void clear() {
378
+ pos.clear();
379
+ }
380
+
381
+ // used to perform SWA pruning of old tokens
382
+ std::unordered_map<llama_seq_id, entry> pos;
383
+ } pending;
384
+
385
+ std::unique_ptr<llama_kv_cache_unified> kv_base;
386
+ std::unique_ptr<llama_kv_cache_unified> kv_swa;
387
+ };
388
+
261
389
  //
262
390
  // llama_kv_cache_recurrent
263
391
  //
@@ -289,7 +417,8 @@ public:
289
417
  lm_ggml_type type_k,
290
418
  lm_ggml_type type_v,
291
419
  bool offload,
292
- uint32_t kv_size);
420
+ uint32_t kv_size,
421
+ uint32_t n_seq_max);
293
422
 
294
423
  ~llama_kv_cache_recurrent() = default;
295
424
 
@@ -301,10 +430,11 @@ public:
301
430
 
302
431
  bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
303
432
  void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
304
- void seq_keep(llama_seq_id seq_id) override;
433
+ void seq_keep(llama_seq_id seq_id) override;
305
434
  void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
306
435
  void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
307
436
 
437
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
308
438
  llama_pos seq_pos_max(llama_seq_id seq_id) const override;
309
439
 
310
440
  //
@@ -314,24 +444,17 @@ public:
314
444
  void restore() override;
315
445
  void commit() override;
316
446
 
317
- bool update(llama_context & lctx) override;
447
+ bool update(llama_context & ctx) override;
318
448
 
319
449
  void defrag_sched(float thold) override;
320
450
 
321
451
  void set_full() override;
322
452
 
323
453
  llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
324
-
325
454
  llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
326
455
 
327
456
  bool find_slot(const llama_ubatch & batch) override;
328
457
 
329
- int32_t get_n_tokens() const override;
330
- int32_t get_used_cells() const override;
331
-
332
- // TODO: better data structures to reduce the cost of this operation
333
- llama_pos get_pos_max() const override;
334
-
335
458
  bool get_can_shift() const override;
336
459
 
337
460
  // TODO: temporary methods - they are not really const as they do const_cast<>, fix this
@@ -343,11 +466,8 @@ public:
343
466
  void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
344
467
  void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
345
468
 
346
- // Note: The value of head isn't only used to optimize searching
347
- // for a free KV slot. llama_decode_impl also uses it, so it
348
- // cannot be freely changed after a slot has been allocated.
349
- uint32_t head = 0;
350
- uint32_t size = 0;
469
+ uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
470
+ uint32_t size = 0; // total number of cells, shared across all sequences
351
471
  uint32_t used = 0; // used cells (i.e. at least one seq_id)
352
472
 
353
473
  // computed before each graph build
@@ -374,8 +494,7 @@ private:
374
494
  std::vector<slot_range> ranges;
375
495
  } pending;
376
496
 
377
- lm_ggml_type type_k = LM_GGML_TYPE_F16;
378
- lm_ggml_type type_v = LM_GGML_TYPE_F16;
497
+ const uint32_t n_seq_max = 1;
379
498
 
380
499
  std::vector<lm_ggml_context_ptr> ctxs;
381
500
  std::vector<lm_ggml_backend_buffer_ptr> bufs;
@@ -394,12 +513,3 @@ private:
394
513
  bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
395
514
  bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
396
515
  };
397
-
398
-
399
- //
400
- // kv cache view
401
- //
402
-
403
- llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max);
404
-
405
- void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv);
@@ -7,8 +7,8 @@ struct llama_memory_params {
7
7
  lm_ggml_type type_k;
8
8
  lm_ggml_type type_v;
9
9
 
10
- // parameters for other types of memory
11
- // ...
10
+ // use full-size SWA cache
11
+ bool swa_full;
12
12
  };
13
13
 
14
14
  // general concept of LLM memory
@@ -25,6 +25,7 @@ public:
25
25
  virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) = 0;
26
26
  virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0;
27
27
 
28
+ virtual llama_pos seq_pos_min(llama_seq_id seq_id) const = 0;
28
29
  virtual llama_pos seq_pos_max(llama_seq_id seq_id) const = 0;
29
30
 
30
31
  virtual bool get_can_edit() const = 0;
@@ -0,0 +1,37 @@
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+ #include "llama-arch.h"
5
+
6
+ #include <vector>
7
+
8
+ struct llama_model_saver {
9
+ struct lm_gguf_context * lm_gguf_ctx = nullptr;
10
+ const struct llama_model & model;
11
+ const struct LLM_KV llm_kv;
12
+
13
+ llama_model_saver(const struct llama_model & model);
14
+ ~llama_model_saver();
15
+
16
+ void add_kv(enum llm_kv key, uint32_t value);
17
+ void add_kv(enum llm_kv key, int32_t value);
18
+ void add_kv(enum llm_kv key, float value);
19
+ void add_kv(enum llm_kv key, bool value);
20
+ void add_kv(enum llm_kv key, const char * value);
21
+
22
+ [[noreturn]]
23
+ void add_kv(enum llm_kv key, char value); // needed to make the template below compile
24
+
25
+ template <typename Container>
26
+ void add_kv(enum llm_kv key, const Container & value, bool per_layer = false);
27
+
28
+ void add_kv(enum llm_kv key, const std::vector<std::string> & value);
29
+
30
+ void add_tensor(const struct lm_ggml_tensor * tensor);
31
+
32
+ void add_kv_from_model();
33
+
34
+ void add_tensors_from_model();
35
+
36
+ void save(const std::string & path_model);
37
+ };
@@ -76,6 +76,7 @@ enum llm_type {
76
76
  LLM_TYPE_236B,
77
77
  LLM_TYPE_290B,
78
78
  LLM_TYPE_314B,
79
+ LLM_TYPE_405B,
79
80
  LLM_TYPE_671B,
80
81
  LLM_TYPE_SMALL,
81
82
  LLM_TYPE_MEDIUM,
@@ -95,6 +96,8 @@ enum llm_type {
95
96
  LLM_TYPE_235B_A22B,
96
97
  };
97
98
 
99
+ std::string llama_rope_scaling_type_name(llama_rope_scaling_type rope_scaling_type);
100
+
98
101
  struct llama_layer_posnet {
99
102
  // resnet
100
103
  struct lm_ggml_tensor * norm1 = nullptr;
@@ -395,7 +398,10 @@ struct llama_model {
395
398
 
396
399
  const struct lm_ggml_tensor * get_tensor(const char * name) const;
397
400
 
398
- lm_ggml_tensor * get_rope_factors(uint32_t n_ctx_per_seq, int il) const;
401
+ float get_rope_freq_base (const llama_cparams & cparams, int il) const;
402
+ float get_rope_freq_scale(const llama_cparams & cparams, int il) const;
403
+
404
+ lm_ggml_tensor * get_rope_factors(const llama_cparams & cparams, int il) const;
399
405
 
400
406
  // note: can mutate `cparams`
401
407
  // TODO: move this to new llm_arch_model_i interface
@@ -21,6 +21,9 @@ struct llama_vocab {
21
21
 
22
22
  void load(llama_model_loader & ml, const LLM_KV & kv);
23
23
 
24
+ std::string get_tokenizer_model() const;
25
+ std::string get_tokenizer_pre() const;
26
+
24
27
  enum llama_vocab_type get_type() const;
25
28
  enum llama_vocab_pre_type get_pre_type() const;
26
29
 
@@ -80,6 +83,9 @@ struct llama_vocab {
80
83
  int max_token_len() const;
81
84
 
82
85
  int find_bpe_rank(const std::string & token_left, const std::string & token_right) const;
86
+ std::vector<std::string> get_bpe_merges() const;
87
+
88
+ std::vector<char> get_precompiled_charsmap() const;
83
89
 
84
90
  int32_t tokenize(
85
91
  const char * text,