cui-llama.rn 1.4.4 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (216) hide show
  1. package/android/src/main/CMakeLists.txt +9 -2
  2. package/android/src/main/jni.cpp +54 -34
  3. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  4. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  5. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  9. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  10. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  11. package/cpp/binary-ops.cpp +158 -0
  12. package/cpp/binary-ops.h +16 -0
  13. package/cpp/chat.cpp +1769 -1085
  14. package/cpp/chat.h +143 -0
  15. package/cpp/common.cpp +1562 -1996
  16. package/cpp/common.h +677 -744
  17. package/cpp/cpu-common.h +72 -0
  18. package/cpp/ggml-alloc.c +1039 -1030
  19. package/cpp/ggml-alloc.h +1 -1
  20. package/cpp/ggml-backend-impl.h +255 -255
  21. package/cpp/ggml-backend-reg.cpp +586 -582
  22. package/cpp/ggml-backend.cpp +2004 -2002
  23. package/cpp/ggml-backend.h +354 -354
  24. package/cpp/ggml-common.h +1857 -1851
  25. package/cpp/ggml-cpp.h +39 -39
  26. package/cpp/ggml-cpu-aarch64.cpp +5725 -4247
  27. package/cpp/ggml-cpu-aarch64.h +8 -8
  28. package/cpp/ggml-cpu-impl.h +512 -380
  29. package/cpp/ggml-cpu-quants.c +13026 -11517
  30. package/cpp/ggml-cpu-traits.cpp +36 -36
  31. package/cpp/ggml-cpu-traits.h +38 -38
  32. package/cpp/ggml-cpu.c +3438 -14485
  33. package/cpp/ggml-cpu.cpp +655 -633
  34. package/cpp/ggml-cpu.h +138 -135
  35. package/cpp/ggml-impl.h +594 -567
  36. package/cpp/ggml-metal-impl.h +312 -3
  37. package/cpp/ggml-metal.h +66 -66
  38. package/cpp/ggml-metal.m +5360 -5002
  39. package/cpp/ggml-opt.cpp +854 -854
  40. package/cpp/ggml-opt.h +216 -216
  41. package/cpp/ggml-quants.c +5238 -5238
  42. package/cpp/ggml-threading.h +14 -14
  43. package/cpp/ggml.c +6618 -6524
  44. package/cpp/ggml.h +2222 -2194
  45. package/cpp/gguf.cpp +1330 -1329
  46. package/cpp/gguf.h +202 -202
  47. package/cpp/json-schema-to-grammar.cpp +1024 -1025
  48. package/cpp/json-schema-to-grammar.h +21 -22
  49. package/cpp/json.hpp +24766 -24766
  50. package/cpp/llama-adapter.cpp +382 -347
  51. package/cpp/llama-adapter.h +76 -74
  52. package/cpp/llama-arch.cpp +1714 -1492
  53. package/cpp/llama-arch.h +428 -402
  54. package/cpp/llama-batch.cpp +368 -368
  55. package/cpp/llama-batch.h +88 -88
  56. package/cpp/llama-chat.cpp +640 -587
  57. package/cpp/llama-chat.h +56 -53
  58. package/cpp/llama-context.cpp +2831 -1775
  59. package/cpp/llama-context.h +265 -128
  60. package/cpp/llama-cparams.cpp +1 -1
  61. package/cpp/llama-cparams.h +38 -37
  62. package/cpp/llama-cpp.h +30 -30
  63. package/cpp/llama-grammar.cpp +1219 -1219
  64. package/cpp/llama-grammar.h +173 -164
  65. package/cpp/llama-graph.cpp +1695 -0
  66. package/cpp/llama-graph.h +592 -0
  67. package/cpp/llama-hparams.cpp +79 -71
  68. package/cpp/llama-hparams.h +156 -139
  69. package/cpp/llama-impl.cpp +167 -167
  70. package/cpp/llama-impl.h +61 -61
  71. package/cpp/llama-io.cpp +15 -0
  72. package/cpp/llama-io.h +35 -0
  73. package/cpp/llama-kv-cache.cpp +1380 -718
  74. package/cpp/llama-kv-cache.h +213 -218
  75. package/cpp/llama-memory.cpp +1 -0
  76. package/cpp/llama-memory.h +21 -0
  77. package/cpp/llama-mmap.cpp +600 -590
  78. package/cpp/llama-mmap.h +68 -68
  79. package/cpp/llama-model-loader.cpp +1129 -1124
  80. package/cpp/llama-model-loader.h +169 -167
  81. package/cpp/llama-model.cpp +13080 -4023
  82. package/cpp/llama-model.h +409 -370
  83. package/cpp/llama-sampling.cpp +2563 -2525
  84. package/cpp/llama-sampling.h +32 -32
  85. package/cpp/llama-vocab.cpp +3295 -3252
  86. package/cpp/llama-vocab.h +125 -125
  87. package/cpp/llama.cpp +351 -10137
  88. package/cpp/llama.h +1434 -1340
  89. package/cpp/log.cpp +427 -423
  90. package/cpp/log.h +132 -132
  91. package/cpp/{chat-template.hpp → minja/chat-template.hpp} +537 -529
  92. package/cpp/{minja.hpp → minja/minja.hpp} +2941 -2883
  93. package/cpp/ops.cpp +8723 -0
  94. package/cpp/ops.h +128 -0
  95. package/cpp/rn-llama.cpp +45 -71
  96. package/cpp/rn-llama.h +3 -3
  97. package/cpp/sampling.cpp +573 -532
  98. package/cpp/sgemm.cpp +3043 -2598
  99. package/cpp/sgemm.h +14 -14
  100. package/cpp/simd-mappings.h +888 -0
  101. package/cpp/speculative.cpp +278 -277
  102. package/cpp/speculative.h +28 -28
  103. package/cpp/unary-ops.cpp +186 -0
  104. package/cpp/unary-ops.h +28 -0
  105. package/cpp/vec.cpp +258 -0
  106. package/cpp/vec.h +802 -0
  107. package/ios/CMakeLists.txt +5 -2
  108. package/ios/RNLlama.mm +2 -2
  109. package/ios/RNLlamaContext.mm +40 -24
  110. package/package.json +1 -1
  111. package/src/NativeRNLlama.ts +6 -4
  112. package/src/index.ts +3 -1
  113. package/android/src/main/build-arm64/CMakeCache.txt +0 -429
  114. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeCCompiler.cmake +0 -81
  115. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeCXXCompiler.cmake +0 -101
  116. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeDetermineCompilerABI_C.bin +0 -0
  117. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeDetermineCompilerABI_CXX.bin +0 -0
  118. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeSystem.cmake +0 -15
  119. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdC/CMakeCCompilerId.c +0 -904
  120. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdC/CMakeCCompilerId.o +0 -0
  121. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdCXX/CMakeCXXCompilerId.cpp +0 -919
  122. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdCXX/CMakeCXXCompilerId.o +0 -0
  123. package/android/src/main/build-arm64/CMakeFiles/CMakeConfigureLog.yaml +0 -431
  124. package/android/src/main/build-arm64/CMakeFiles/CMakeDirectoryInformation.cmake +0 -16
  125. package/android/src/main/build-arm64/CMakeFiles/Makefile.cmake +0 -165
  126. package/android/src/main/build-arm64/CMakeFiles/Makefile2 +0 -297
  127. package/android/src/main/build-arm64/CMakeFiles/Progress/1 +0 -1
  128. package/android/src/main/build-arm64/CMakeFiles/Progress/2 +0 -1
  129. package/android/src/main/build-arm64/CMakeFiles/Progress/3 +0 -1
  130. package/android/src/main/build-arm64/CMakeFiles/Progress/4 +0 -1
  131. package/android/src/main/build-arm64/CMakeFiles/Progress/5 +0 -1
  132. package/android/src/main/build-arm64/CMakeFiles/Progress/6 +0 -1
  133. package/android/src/main/build-arm64/CMakeFiles/Progress/count.txt +0 -1
  134. package/android/src/main/build-arm64/CMakeFiles/TargetDirectories.txt +0 -8
  135. package/android/src/main/build-arm64/CMakeFiles/cmake.check_cache +0 -1
  136. package/android/src/main/build-arm64/CMakeFiles/progress.marks +0 -1
  137. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-alloc.c.o +0 -0
  138. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-alloc.c.o.d +0 -58
  139. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend-reg.cpp.o +0 -0
  140. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend-reg.cpp.o.d +0 -756
  141. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend.cpp.o +0 -0
  142. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend.cpp.o.d +0 -709
  143. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-aarch64.cpp.o +0 -0
  144. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-aarch64.cpp.o.d +0 -714
  145. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-quants.c.o +0 -0
  146. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-quants.c.o.d +0 -62
  147. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-traits.cpp.o +0 -0
  148. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-traits.cpp.o.d +0 -708
  149. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.c.o +0 -0
  150. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.c.o.d +0 -113
  151. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.cpp.o +0 -0
  152. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.cpp.o.d +0 -713
  153. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-opt.cpp.o +0 -0
  154. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-opt.cpp.o.d +0 -763
  155. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-quants.c.o +0 -0
  156. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-quants.c.o.d +0 -61
  157. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-threading.cpp.o +0 -0
  158. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-threading.cpp.o.d +0 -707
  159. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml.c.o +0 -0
  160. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml.c.o.d +0 -104
  161. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/gguf.cpp.o +0 -0
  162. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/gguf.cpp.o.d +0 -714
  163. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/log.cpp.o +0 -0
  164. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/log.cpp.o.d +0 -723
  165. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/DependInfo.cmake +0 -62
  166. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/build.make +0 -722
  167. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/cmake_clean.cmake +0 -89
  168. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/compiler_depend.make +0 -2
  169. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/compiler_depend.ts +0 -2
  170. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/depend.make +0 -2
  171. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/flags.make +0 -17
  172. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/progress.make +0 -41
  173. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/DependInfo.cmake +0 -62
  174. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/build.make +0 -722
  175. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/cmake_clean.cmake +0 -89
  176. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/compiler_depend.make +0 -2
  177. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/compiler_depend.ts +0 -2
  178. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/depend.make +0 -2
  179. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/flags.make +0 -17
  180. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/progress.make +0 -41
  181. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/DependInfo.cmake +0 -62
  182. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/build.make +0 -722
  183. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/cmake_clean.cmake +0 -89
  184. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/compiler_depend.make +0 -2
  185. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/compiler_depend.ts +0 -2
  186. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/depend.make +0 -2
  187. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/flags.make +0 -17
  188. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/progress.make +0 -41
  189. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/DependInfo.cmake +0 -62
  190. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/build.make +0 -722
  191. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/cmake_clean.cmake +0 -89
  192. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/compiler_depend.make +0 -2
  193. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/compiler_depend.ts +0 -2
  194. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/depend.make +0 -2
  195. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/flags.make +0 -17
  196. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/progress.make +0 -41
  197. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/DependInfo.cmake +0 -62
  198. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/build.make +0 -722
  199. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/cmake_clean.cmake +0 -89
  200. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/compiler_depend.make +0 -2
  201. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/compiler_depend.ts +0 -2
  202. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/depend.make +0 -2
  203. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/flags.make +0 -17
  204. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/progress.make +0 -41
  205. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/DependInfo.cmake +0 -62
  206. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/build.make +0 -722
  207. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/cmake_clean.cmake +0 -89
  208. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/compiler_depend.make +0 -2
  209. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/compiler_depend.ts +0 -2
  210. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/depend.make +0 -2
  211. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/flags.make +0 -17
  212. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/progress.make +0 -41
  213. package/android/src/main/build-arm64/Makefile +0 -1862
  214. package/android/src/main/build-arm64/cmake_install.cmake +0 -66
  215. package/cpp/chat.hpp +0 -55
  216. package/cpp/rn-llama.hpp +0 -913
@@ -1,218 +1,213 @@
1
- #pragma once
2
-
3
- #include "llama.h"
4
-
5
- #include "ggml-cpp.h"
6
-
7
- #include <set>
8
- #include <vector>
9
-
10
- struct llama_kv_cell {
11
- llama_pos pos = -1;
12
- llama_pos delta = 0;
13
- int32_t src = -1; // used by recurrent state models to copy states
14
- int32_t tail = -1;
15
-
16
- std::set<llama_seq_id> seq_id;
17
-
18
- bool has_seq_id(const llama_seq_id & id) const {
19
- return seq_id.find(id) != seq_id.end();
20
- }
21
-
22
- bool is_empty() const {
23
- return seq_id.empty();
24
- }
25
-
26
- bool is_same_seq(const llama_kv_cell & other) const {
27
- return seq_id == other.seq_id;
28
- }
29
- };
30
-
31
- // ring-buffer of cached KV data
32
- struct llama_kv_cache {
33
- bool has_shift = false;
34
- bool do_defrag = false;
35
- bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
36
- bool v_trans = true; // the value tensor is transposed
37
- bool can_shift = false;
38
-
39
- // Note: The value of head isn't only used to optimize searching
40
- // for a free KV slot. llama_decode_impl also uses it, so it
41
- // cannot be freely changed after a slot has been allocated.
42
- uint32_t head = 0;
43
- uint32_t size = 0;
44
- uint32_t used = 0; // used cells (i.e. at least one seq_id)
45
-
46
- // computed before each graph build
47
- uint32_t n = 0;
48
-
49
- lm_ggml_type type_k = LM_GGML_TYPE_F16;
50
- lm_ggml_type type_v = LM_GGML_TYPE_F16;
51
-
52
- std::vector<llama_kv_cell> cells;
53
-
54
- std::vector<struct lm_ggml_tensor *> k_l; // per layer
55
- std::vector<struct lm_ggml_tensor *> v_l;
56
-
57
- std::vector<lm_ggml_context_ptr> ctxs;
58
- std::vector<lm_ggml_backend_buffer_ptr> bufs;
59
-
60
- size_t total_size() const {
61
- size_t size = 0;
62
- for (const auto & buf : bufs) {
63
- size += lm_ggml_backend_buffer_get_size(buf.get());
64
- }
65
-
66
- return size;
67
- }
68
-
69
- // TODO: better data structures to reduce the cost of this operation
70
- llama_pos max_pos() const {
71
- llama_pos max_pos = -1;
72
- for (const auto & cell : cells) {
73
- max_pos = std::max(max_pos, cell.pos);
74
- }
75
-
76
- return max_pos;
77
- }
78
- };
79
-
80
- // a structure holds information about the slot found in llama_kv_cache_find_slot
81
- struct llama_kv_cache_slot_info {
82
- std::pair<uint32_t, uint32_t> boundaries; // slot boundaries [begin, end)
83
- bool found = false; // the slot was found
84
-
85
- explicit llama_kv_cache_slot_info(bool found_) : found{found_} {}
86
- llama_kv_cache_slot_info(uint32_t begin, uint32_t end) : boundaries{begin, end}, found{true} {}
87
-
88
- operator bool() const { return found; }
89
- };
90
-
91
- // TODO: maybe not needed
92
- uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams);
93
-
94
- bool llama_kv_cache_init(
95
- struct llama_kv_cache & cache,
96
- const llama_model & model,
97
- const llama_cparams & cparams,
98
- lm_ggml_type type_k,
99
- lm_ggml_type type_v,
100
- uint32_t kv_size,
101
- bool offload);
102
-
103
- // find an empty slot of size "n_tokens" in the cache
104
- // updates the cache head
105
- // returns a structure holding information about the slot found
106
- // Note: On success, it's important that cache.head points
107
- // to the first cell of the slot.
108
- struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
109
- struct llama_kv_cache & cache,
110
- const struct llama_ubatch & batch);
111
-
112
- // find how many cells are currently in use
113
- uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache);
114
-
115
- void llama_kv_cache_clear(struct llama_kv_cache & cache);
116
-
117
- bool llama_kv_cache_seq_rm(
118
- struct llama_kv_cache & cache,
119
- llama_seq_id seq_id,
120
- llama_pos p0,
121
- llama_pos p1);
122
-
123
- void llama_kv_cache_seq_cp(
124
- struct llama_kv_cache & cache,
125
- llama_seq_id seq_id_src,
126
- llama_seq_id seq_id_dst,
127
- llama_pos p0,
128
- llama_pos p1);
129
-
130
- void llama_kv_cache_seq_keep(
131
- struct llama_kv_cache & cache,
132
- llama_seq_id seq_id);
133
-
134
- void llama_kv_cache_seq_add(
135
- struct llama_kv_cache & cache,
136
- llama_seq_id seq_id,
137
- llama_pos p0,
138
- llama_pos p1,
139
- llama_pos delta);
140
-
141
- void llama_kv_cache_seq_div(
142
- struct llama_kv_cache & cache,
143
- llama_seq_id seq_id,
144
- llama_pos p0,
145
- llama_pos p1,
146
- int d);
147
-
148
- llama_pos llama_kv_cache_seq_pos_max(
149
- struct llama_kv_cache & cache,
150
- llama_seq_id seq_id);
151
-
152
- void llama_kv_cache_defrag(struct llama_kv_cache & cache);
153
-
154
- int32_t llama_get_kv_cache_token_count(const struct llama_kv_cache & kv);
155
-
156
- int32_t llama_get_kv_cache_used_cells(const struct llama_kv_cache & kv);
157
-
158
- bool llama_kv_cache_can_shift(const struct llama_kv_cache & kv);
159
-
160
- //
161
- // kv cache view
162
- //
163
-
164
- struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_kv_cache & kv, int32_t n_seq_max);
165
-
166
- void llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct llama_kv_cache & kv);
167
-
168
- //
169
- // kv cache restore
170
- //
171
-
172
- // saves the kv_cache state for future recovery.
173
- // used to rollback llama_kv_cache_find_slot changes.
174
- struct llama_kv_slot_restorer {
175
- struct llama_kv_cache_state {
176
- uint32_t head = 0;
177
- uint32_t n = 0;
178
- } old_state;
179
-
180
- // for non-recurrent models only
181
- // list of slots to restore
182
- std::vector<std::pair<uint32_t, uint32_t>> slot_boundaries;
183
-
184
- bool do_restore = false;
185
-
186
- explicit llama_kv_slot_restorer(const struct llama_kv_cache & cache) {
187
- old_state.head = cache.head;
188
- old_state.n = cache.n;
189
- }
190
-
191
- // saves a slot information for future restoration
192
- void save(const struct llama_kv_cache_slot_info & slot) {
193
- if (slot) {
194
- do_restore = true;
195
- if (slot.boundaries.first != slot.boundaries.second) {
196
- slot_boundaries.push_back(slot.boundaries);
197
- }
198
- }
199
- }
200
-
201
- // must be explicitly called to restore the kv_cache state
202
- // and rollback changes from all llama_kv_cache_find_slot calls
203
- void restore(struct llama_kv_cache & cache) {
204
- if (do_restore) {
205
- cache.head = old_state.head;
206
- cache.n = old_state.n;
207
-
208
- if (cache.recurrent) { // recurrent models like Mamba or RWKV can't have a state partially erased
209
- llama_kv_cache_seq_rm(cache, -1, -1, -1);
210
- } else {
211
- for (auto & slot : slot_boundaries) {
212
- llama_kv_cache_seq_rm(cache, -1, slot.first, slot.second);
213
- }
214
- }
215
- }
216
- }
217
- };
218
-
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+ #include "llama-io.h"
5
+ #include "llama-memory.h"
6
+
7
+ #include "ggml-cpp.h"
8
+
9
+ #include <functional>
10
+ #include <set>
11
+ #include <vector>
12
+
13
+ struct llama_cparams;
14
+ struct llama_hparams;
15
+ struct llama_ubatch;
16
+
17
+ struct llama_kv_cache : public llama_memory_i {
18
+ using llama_memory_i::llama_memory_i;
19
+
20
+ virtual void restore() = 0; // call if batch processing fails - restores the cache state
21
+ virtual void commit() = 0; // call after successful batch processing - clears any pending state
22
+
23
+ virtual int32_t get_n_tokens() const = 0;
24
+ virtual int32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache
25
+
26
+ virtual bool get_can_shift() const = 0;
27
+
28
+ bool get_can_edit() const override { return get_can_shift(); }
29
+ };
30
+
31
+ struct llama_kv_cache_guard {
32
+ llama_kv_cache_guard(llama_kv_cache * kv) : kv(kv) {}
33
+
34
+ ~llama_kv_cache_guard() {
35
+ kv->restore();
36
+ }
37
+
38
+ void commit() {
39
+ kv->commit();
40
+ }
41
+
42
+ private:
43
+ llama_kv_cache * kv;
44
+ };
45
+
46
+ struct llama_kv_cell {
47
+ llama_pos pos = -1;
48
+ llama_pos delta = 0;
49
+ int32_t src = -1; // used by recurrent state models to copy states
50
+ int32_t tail = -1;
51
+
52
+ std::set<llama_seq_id> seq_id;
53
+
54
+ bool has_seq_id(const llama_seq_id & id) const {
55
+ return seq_id.find(id) != seq_id.end();
56
+ }
57
+
58
+ bool is_empty() const {
59
+ return seq_id.empty();
60
+ }
61
+
62
+ bool is_same_seq(const llama_kv_cell & other) const {
63
+ return seq_id == other.seq_id;
64
+ }
65
+ };
66
+
67
+ // ring-buffer of cached KV data
68
+ // TODO: pimpl
69
+ // TODO: add notion of max sequences
70
+ class llama_kv_cache_unified : public llama_kv_cache {
71
+ public:
72
+ // can be used to query data from the model if needed
73
+ struct callbacks {
74
+ std::function<lm_ggml_tensor * (uint32_t n_ctx_per_seq, int il)> get_rope_factors;
75
+ };
76
+
77
+ llama_kv_cache_unified(
78
+ const llama_hparams & hparams,
79
+ callbacks cbs);
80
+
81
+ virtual ~llama_kv_cache_unified() = default;
82
+
83
+ // TODO: become constructor
84
+ bool init(
85
+ const llama_model & model, // TODO: do not reference the model
86
+ const llama_cparams & cparams,
87
+ lm_ggml_type type_k,
88
+ lm_ggml_type type_v,
89
+ uint32_t kv_size,
90
+ bool offload);
91
+
92
+ int32_t get_n_tokens() const override;
93
+ int32_t get_used_cells() const override;
94
+
95
+ size_t total_size() const;
96
+
97
+ // TODO: better data structures to reduce the cost of this operation
98
+ llama_pos pos_max() const;
99
+
100
+ void clear() override;
101
+ void defrag() override;
102
+
103
+ virtual void restore() override;
104
+ virtual void commit() override;
105
+
106
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
107
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
108
+ void seq_keep(llama_seq_id seq_id) override;
109
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
110
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
111
+
112
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
113
+
114
+ bool get_can_shift() const override;
115
+
116
+ // find an empty slot of size "n_tokens" in the cache
117
+ // updates the cache head
118
+ // Note: On success, it's important that cache.head points
119
+ // to the first cell of the slot.
120
+ bool find_slot(const llama_ubatch & batch);
121
+
122
+ // TODO: maybe not needed
123
+ uint32_t get_padding(const llama_cparams & cparams) const;
124
+
125
+ // find how many cells are currently in use
126
+ uint32_t cell_max() const;
127
+
128
+ size_t size_k_bytes() const;
129
+ size_t size_v_bytes() const;
130
+
131
+ // defrag
132
+
133
+ struct {
134
+ std::vector<uint32_t> ids;
135
+ } defrag_info;
136
+
137
+ // return true if cells have been moved
138
+ bool defrag_prepare(int32_t n_max_nodes);
139
+
140
+ // commit/restore cache
141
+
142
+ struct slot_range {
143
+ uint32_t c0 = 0; // note: these are cell indices, not sequence positions
144
+ uint32_t c1 = 0;
145
+ };
146
+
147
+ // pending cell updates that are not yet committed
148
+ struct {
149
+ std::vector<slot_range> ranges;
150
+ } pending;
151
+
152
+ // state write/load
153
+
154
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const;
155
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1);
156
+
157
+ // members
158
+
159
+ const llama_hparams & hparams;
160
+
161
+ callbacks cbs;
162
+
163
+ bool has_shift = false;
164
+ bool do_defrag = false;
165
+
166
+ // TODO: remove this and implement llama_kv_cache_recurrent instead
167
+ bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
168
+
169
+ bool v_trans = true; // the value tensor is transposed
170
+ bool can_shift = false;
171
+
172
+ // Note: The value of head isn't only used to optimize searching
173
+ // for a free KV slot. llama_decode_impl also uses it, so it
174
+ // cannot be freely changed after a slot has been allocated.
175
+ uint32_t head = 0;
176
+ uint32_t size = 0;
177
+ uint32_t used = 0; // used cells (i.e. at least one seq_id)
178
+
179
+ // computed before each graph build
180
+ uint32_t n = 0;
181
+
182
+ std::vector<llama_kv_cell> cells;
183
+
184
+ std::vector<lm_ggml_tensor *> k_l; // per layer
185
+ std::vector<lm_ggml_tensor *> v_l;
186
+
187
+ private:
188
+ lm_ggml_type type_k = LM_GGML_TYPE_F16;
189
+ lm_ggml_type type_v = LM_GGML_TYPE_F16;
190
+
191
+ std::vector<lm_ggml_context_ptr> ctxs;
192
+ std::vector<lm_ggml_backend_buffer_ptr> bufs;
193
+
194
+ void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
195
+ void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
196
+
197
+ bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
198
+ bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
199
+ };
200
+
201
+ // TODO: temporary reusing llama_kv_cache_unified -- implement recurrent cache and simplify llama_kv_cache_unified
202
+ //class llama_kv_cache_recurrent : public llama_kv_cache_unified {
203
+ //public:
204
+ // using llama_kv_cache_unified::llama_kv_cache_unified;
205
+ //};
206
+
207
+ //
208
+ // kv cache view
209
+ //
210
+
211
+ llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max);
212
+
213
+ void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv);
@@ -0,0 +1 @@
1
+ #include "llama-memory.h"
@@ -0,0 +1,21 @@
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+
5
+ // general concept of LLM memory
6
+ // the KV cache is a type of LLM memory, but there can be other types
7
+ class llama_memory_i {
8
+ public:
9
+ virtual void clear() = 0;
10
+ virtual void defrag() = 0;
11
+
12
+ virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0;
13
+ virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0;
14
+ virtual void seq_keep(llama_seq_id seq_id) = 0;
15
+ virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) = 0;
16
+ virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0;
17
+
18
+ virtual llama_pos seq_pos_max(llama_seq_id seq_id) const = 0;
19
+
20
+ virtual bool get_can_edit() const = 0;
21
+ };