cui-llama.rn 1.6.0 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. package/README.md +35 -7
  2. package/android/src/main/CMakeLists.txt +16 -11
  3. package/android/src/main/java/com/rnllama/LlamaContext.java +4 -1
  4. package/android/src/main/jni.cpp +20 -4
  5. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  9. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  10. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  11. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  12. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  13. package/cpp/LICENSE +21 -0
  14. package/cpp/chat.cpp +1 -1
  15. package/cpp/common.cpp +17 -2
  16. package/cpp/common.h +7 -3
  17. package/cpp/ggml-alloc.c +4 -1
  18. package/cpp/ggml-cpp.h +1 -1
  19. package/cpp/ggml-cpu/amx/amx.cpp +221 -0
  20. package/cpp/ggml-cpu/amx/amx.h +8 -0
  21. package/cpp/ggml-cpu/amx/common.h +91 -0
  22. package/cpp/ggml-cpu/amx/mmq.cpp +2511 -0
  23. package/cpp/ggml-cpu/amx/mmq.h +10 -0
  24. package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/binary-ops.h +1 -1
  25. package/cpp/ggml-cpu/common.h +72 -0
  26. package/cpp/{ggml-cpu-aarch64.cpp → ggml-cpu/ggml-cpu-aarch64.cpp} +809 -101
  27. package/cpp/{ggml-cpu.c → ggml-cpu/ggml-cpu.c} +109 -42
  28. package/cpp/{ggml-cpu.cpp → ggml-cpu/ggml-cpu.cpp} +3 -0
  29. package/cpp/{ops.cpp → ggml-cpu/ops.cpp} +246 -160
  30. package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/ops.h +2 -20
  31. package/cpp/{sgemm.cpp → ggml-cpu/sgemm.cpp} +501 -0
  32. package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/simd-mappings.h +7 -3
  33. package/{ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers → cpp/ggml-cpu}/unary-ops.h +1 -1
  34. package/cpp/ggml-cpu.h +5 -0
  35. package/cpp/ggml-impl.h +16 -9
  36. package/cpp/ggml-llama-sim.metallib +0 -0
  37. package/cpp/ggml-llama.metallib +0 -0
  38. package/cpp/ggml-metal.m +492 -47
  39. package/cpp/ggml.c +134 -244
  40. package/cpp/ggml.h +61 -94
  41. package/cpp/json-schema-to-grammar.cpp +3 -0
  42. package/cpp/llama-arch.cpp +46 -17
  43. package/cpp/llama-arch.h +9 -0
  44. package/cpp/llama-batch.cpp +5 -1
  45. package/cpp/llama-batch.h +2 -1
  46. package/cpp/llama-chat.cpp +31 -10
  47. package/cpp/llama-chat.h +3 -2
  48. package/cpp/llama-context.cpp +104 -489
  49. package/cpp/llama-context.h +14 -30
  50. package/cpp/llama-graph.cpp +69 -62
  51. package/cpp/llama-graph.h +21 -18
  52. package/cpp/llama-hparams.h +5 -0
  53. package/cpp/llama-kv-cache.cpp +1497 -391
  54. package/cpp/llama-kv-cache.h +272 -80
  55. package/cpp/llama-memory.h +11 -1
  56. package/cpp/llama-model.cpp +502 -176
  57. package/cpp/llama-model.h +13 -3
  58. package/cpp/llama-sampling.cpp +2 -1
  59. package/cpp/llama-vocab.cpp +8 -1
  60. package/cpp/llama.h +14 -11
  61. package/cpp/rn-llama.cpp +20 -172
  62. package/cpp/rn-llama.h +1 -5
  63. package/ios/CMakeLists.txt +13 -10
  64. package/ios/RNLlama.h +6 -0
  65. package/ios/RNLlama.mm +5 -0
  66. package/ios/RNLlamaContext.mm +26 -28
  67. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +7 -3
  68. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpp.h +1 -1
  69. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +5 -0
  70. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +16 -9
  71. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +61 -94
  72. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +9 -0
  73. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +2 -1
  74. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +3 -2
  75. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +14 -30
  76. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +21 -18
  77. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +5 -0
  78. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +272 -80
  79. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +11 -1
  80. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +13 -3
  81. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +14 -11
  82. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +1 -5
  83. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  84. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
  85. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +7 -3
  86. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +1 -1
  87. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +5 -0
  88. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +16 -9
  89. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +61 -94
  90. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +9 -0
  91. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +2 -1
  92. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +3 -2
  93. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +14 -30
  94. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +21 -18
  95. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +5 -0
  96. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +272 -80
  97. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +11 -1
  98. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +13 -3
  99. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +14 -11
  100. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +1 -5
  101. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  102. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  103. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +7 -3
  104. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpp.h +1 -1
  105. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +5 -0
  106. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +16 -9
  107. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +61 -94
  108. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +9 -0
  109. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +2 -1
  110. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +3 -2
  111. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +14 -30
  112. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +21 -18
  113. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +5 -0
  114. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +272 -80
  115. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +11 -1
  116. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +13 -3
  117. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +14 -11
  118. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +1 -5
  119. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  120. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
  121. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +7 -3
  122. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +1 -1
  123. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +5 -0
  124. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +16 -9
  125. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +61 -94
  126. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +9 -0
  127. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +2 -1
  128. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +3 -2
  129. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +14 -30
  130. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +21 -18
  131. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +5 -0
  132. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +272 -80
  133. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +11 -1
  134. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +13 -3
  135. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +14 -11
  136. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +1 -5
  137. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  138. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  139. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  140. package/lib/module/NativeRNLlama.js.map +1 -1
  141. package/lib/typescript/NativeRNLlama.d.ts +4 -0
  142. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  143. package/package.json +1 -1
  144. package/src/NativeRNLlama.ts +5 -0
  145. package/cpp/binary-ops.h +0 -16
  146. package/cpp/ops.h +0 -128
  147. package/cpp/simd-mappings.h +0 -888
  148. package/cpp/unary-ops.h +0 -28
  149. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/binary-ops.h +0 -16
  150. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
  151. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
  152. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
  153. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
  154. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ops.h +0 -128
  155. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sgemm.h +0 -14
  156. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/simd-mappings.h +0 -888
  157. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/vec.h +0 -802
  158. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
  159. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
  160. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
  161. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
  162. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +0 -14
  163. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +0 -28
  164. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +0 -802
  165. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/binary-ops.h +0 -16
  166. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
  167. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
  168. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
  169. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
  170. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ops.h +0 -128
  171. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sgemm.h +0 -14
  172. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/simd-mappings.h +0 -888
  173. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unary-ops.h +0 -28
  174. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/vec.h +0 -802
  175. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/binary-ops.h +0 -16
  176. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
  177. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
  178. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
  179. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
  180. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ops.h +0 -128
  181. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +0 -14
  182. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/simd-mappings.h +0 -888
  183. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +0 -28
  184. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +0 -802
  185. /package/cpp/{binary-ops.cpp → ggml-cpu/binary-ops.cpp} +0 -0
  186. /package/cpp/{ggml-cpu-aarch64.h → ggml-cpu/ggml-cpu-aarch64.h} +0 -0
  187. /package/cpp/{ggml-cpu-impl.h → ggml-cpu/ggml-cpu-impl.h} +0 -0
  188. /package/cpp/{ggml-cpu-quants.c → ggml-cpu/ggml-cpu-quants.c} +0 -0
  189. /package/cpp/{ggml-cpu-quants.h → ggml-cpu/ggml-cpu-quants.h} +0 -0
  190. /package/cpp/{ggml-cpu-traits.cpp → ggml-cpu/ggml-cpu-traits.cpp} +0 -0
  191. /package/cpp/{ggml-cpu-traits.h → ggml-cpu/ggml-cpu-traits.h} +0 -0
  192. /package/cpp/{sgemm.h → ggml-cpu/sgemm.h} +0 -0
  193. /package/cpp/{unary-ops.cpp → ggml-cpu/unary-ops.cpp} +0 -0
  194. /package/cpp/{vec.cpp → ggml-cpu/vec.cpp} +0 -0
  195. /package/cpp/{vec.h → ggml-cpu/vec.h} +0 -0
@@ -4,33 +4,41 @@
4
4
  #include "llama-batch.h"
5
5
  #include "llama-cparams.h"
6
6
  #include "llama-model.h"
7
+ #include "llama-context.h"
7
8
 
8
9
  #include <algorithm>
9
10
  #include <cassert>
11
+ #include <cmath>
10
12
  #include <limits>
11
13
  #include <map>
12
14
  #include <stdexcept>
13
15
 
14
- llama_kv_cache_unified::llama_kv_cache_unified(const llama_hparams & hparams, callbacks cbs) : hparams(hparams), cbs(std::move(cbs)) {
16
+ //
17
+ // llama_kv_cache_unified
18
+ //
19
+
20
+ uint32_t llama_kv_cache_unified::get_padding(const llama_cparams & cparams) {
21
+ // the FA kernels require padding to avoid extra runtime boundary checks
22
+ return cparams.flash_attn ? 256u : 32u;
15
23
  }
16
24
 
17
- bool llama_kv_cache_unified::init(
25
+ llama_kv_cache_unified::llama_kv_cache_unified(
18
26
  const llama_model & model,
19
- const llama_cparams & cparams,
20
27
  lm_ggml_type type_k,
21
28
  lm_ggml_type type_v,
29
+ bool v_trans,
30
+ bool offload,
22
31
  uint32_t kv_size,
23
- bool offload) {
32
+ uint32_t padding) : model(model), hparams(model.hparams), v_trans(v_trans), padding(padding) {
24
33
  const int32_t n_layer = hparams.n_layer;
25
34
 
26
35
  has_shift = false;
36
+ can_shift = true;
27
37
 
28
- recurrent = llama_model_is_recurrent(&model);
29
- v_trans = !recurrent && !cparams.flash_attn;
30
- can_shift = !recurrent && model.arch != LLM_ARCH_DEEPSEEK2; // not supported due to MLA
38
+ LLAMA_LOG_INFO("%s: kv_size = %d, type_k = '%s', type_v = '%s', n_layer = %d, can_shift = %d, padding = %d\n",
39
+ __func__, kv_size, lm_ggml_type_name(type_k), lm_ggml_type_name(type_v), n_layer, can_shift, padding);
31
40
 
32
- LLAMA_LOG_INFO("%s: kv_size = %d, offload = %d, type_k = '%s', type_v = '%s', n_layer = %d, can_shift = %d\n",
33
- __func__, kv_size, offload, lm_ggml_type_name(type_k), lm_ggml_type_name(type_v), n_layer, can_shift);
41
+ LM_GGML_ASSERT(kv_size % padding == 0 && "kv_size must be a multiple of padding");
34
42
 
35
43
  head = 0;
36
44
  size = kv_size;
@@ -76,23 +84,20 @@ bool llama_kv_cache_unified::init(
76
84
 
77
85
  const char * dev_name = "CPU";
78
86
 
79
- lm_ggml_backend_buffer_type_t buft;
87
+ lm_ggml_backend_buffer_type_t buft = lm_ggml_backend_cpu_buffer_type();
88
+
80
89
  if (offload) {
81
90
  auto * dev = model.dev_layer(i);
82
91
  buft = lm_ggml_backend_dev_buffer_type(dev);
83
92
 
84
93
  dev_name = lm_ggml_backend_dev_name(dev);
85
- } else {
86
- buft = lm_ggml_backend_cpu_buffer_type();
87
94
  }
88
95
 
89
- LLAMA_LOG_DEBUG("%s: layer %3d: n_embd_k_gqa = %d, n_embd_v_gqa = %d, dev = %s\n", __func__,
90
- i, n_embd_k_gqa, n_embd_v_gqa, dev_name);
96
+ LLAMA_LOG_DEBUG("%s: layer %3d: dev = %s\n", __func__, i, dev_name);
91
97
 
92
98
  lm_ggml_context * ctx = ctx_for_buft(buft);
93
99
  if (!ctx) {
94
- LLAMA_LOG_ERROR("%s: failed to create ggml context for kv cache\n", __func__);
95
- return false;
100
+ throw std::runtime_error("failed to create ggml context for kv cache");
96
101
  }
97
102
 
98
103
  lm_ggml_tensor * k = lm_ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size);
@@ -110,55 +115,28 @@ bool llama_kv_cache_unified::init(
110
115
 
111
116
  lm_ggml_backend_buffer_t buf = lm_ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
112
117
  if (!buf) {
113
- LLAMA_LOG_ERROR("%s: failed to allocate buffer for kv cache\n", __func__);
114
- return false;
118
+ throw std::runtime_error("failed to allocate buffer for kv cache");
115
119
  }
116
120
  lm_ggml_backend_buffer_clear(buf, 0);
117
121
  LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, lm_ggml_backend_buffer_name(buf), lm_ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
118
122
  bufs.emplace_back(buf);
119
123
  }
120
124
 
121
- return true;
122
- }
123
-
124
- int32_t llama_kv_cache_unified::get_n_tokens() const {
125
- int32_t result = 0;
126
-
127
- for (uint32_t i = 0; i < size; i++) {
128
- result += cells[i].seq_id.size();
129
- }
130
-
131
- return result;
132
- }
133
-
134
- int32_t llama_kv_cache_unified::get_used_cells() const {
135
- return used;
136
- }
137
-
138
- size_t llama_kv_cache_unified::total_size() const {
139
- size_t size = 0;
140
- for (const auto & buf : bufs) {
141
- size += lm_ggml_backend_buffer_get_size(buf.get());
142
- }
143
-
144
- return size;
145
- }
125
+ {
126
+ const size_t memory_size_k = size_k_bytes();
127
+ const size_t memory_size_v = size_v_bytes();
146
128
 
147
- llama_pos llama_kv_cache_unified::pos_max() const {
148
- llama_pos pos_max = -1;
149
- for (const auto & cell : cells) {
150
- pos_max = std::max(pos_max, cell.pos);
129
+ LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
130
+ (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
131
+ lm_ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
132
+ lm_ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
151
133
  }
152
-
153
- return pos_max;
154
134
  }
155
135
 
156
136
  void llama_kv_cache_unified::clear() {
157
137
  for (int32_t i = 0; i < (int32_t) size; ++i) {
158
138
  cells[i].pos = -1;
159
139
  cells[i].seq_id.clear();
160
- cells[i].src = -1;
161
- cells[i].tail = -1;
162
140
  }
163
141
  head = 0;
164
142
  used = 0;
@@ -179,35 +157,6 @@ bool llama_kv_cache_unified::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos
179
157
  p1 = std::numeric_limits<llama_pos>::max();
180
158
  }
181
159
 
182
- // models like Mamba or RWKV can't have a state partially erased
183
- if (recurrent) {
184
- if (seq_id >= (int64_t) size) {
185
- // could be fatal
186
- return false;
187
- }
188
- if (0 <= seq_id) {
189
- int32_t & tail_id = cells[seq_id].tail;
190
- if (tail_id >= 0) {
191
- const llama_kv_cell & cell = cells[tail_id];
192
- // partial intersection is invalid
193
- if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) {
194
- return false;
195
- }
196
- // invalidate tails which will be cleared
197
- if (p0 <= cell.pos && cell.pos < p1) {
198
- tail_id = -1;
199
- }
200
- }
201
- } else {
202
- // seq_id is negative, then the range should include everything or nothing
203
- if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits<llama_pos>::max())) {
204
- return false;
205
- }
206
- }
207
-
208
- return true;
209
- }
210
-
211
160
  for (uint32_t i = 0; i < size; ++i) {
212
161
  if (cells[i].pos >= p0 && cells[i].pos < p1) {
213
162
  if (seq_id < 0) {
@@ -224,7 +173,6 @@ bool llama_kv_cache_unified::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos
224
173
  }
225
174
 
226
175
  cells[i].pos = -1;
227
- cells[i].src = -1;
228
176
 
229
177
  if (new_head == size) {
230
178
  new_head = i;
@@ -254,34 +202,6 @@ void llama_kv_cache_unified::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id
254
202
  p1 = std::numeric_limits<llama_pos>::max();
255
203
  }
256
204
 
257
- if (recurrent) {
258
- if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) {
259
- llama_kv_cell & tail_src = cells[seq_id_src];
260
- llama_kv_cell & tail_dst = cells[seq_id_dst];
261
- if (tail_dst.tail >= 0) {
262
- // clear destination seq_id if it wasn't empty
263
- llama_kv_cell & cell_dst = cells[tail_dst.tail];
264
-
265
- cell_dst.seq_id.erase(seq_id_dst);
266
- tail_dst.tail = -1;
267
- if (cell_dst.seq_id.empty()) {
268
- cell_dst.pos = -1;
269
- cell_dst.delta = -1;
270
- cell_dst.src = -1;
271
- used -= 1;
272
- }
273
- }
274
- if (tail_src.tail >= 0) {
275
- llama_kv_cell & cell_src = cells[tail_src.tail];
276
-
277
- cell_src.seq_id.insert(seq_id_dst);
278
- tail_dst.tail = tail_src.tail;
279
- }
280
- }
281
-
282
- return;
283
- }
284
-
285
205
  // otherwise, this is the KV of a Transformer-like model
286
206
  head = 0;
287
207
 
@@ -296,17 +216,12 @@ void llama_kv_cache_unified::seq_keep(llama_seq_id seq_id) {
296
216
  uint32_t new_head = size;
297
217
 
298
218
  for (uint32_t i = 0; i < size; ++i) {
299
- if (recurrent && (llama_seq_id) i != seq_id) {
300
- cells[i].tail = -1;
301
- }
302
-
303
219
  if (!cells[i].has_seq_id(seq_id)) {
304
220
  if (cells[i].pos >= 0) {
305
221
  used--;
306
222
  }
307
223
 
308
224
  cells[i].pos = -1;
309
- cells[i].src = -1;
310
225
  cells[i].seq_id.clear();
311
226
 
312
227
  if (new_head == size){
@@ -344,20 +259,6 @@ void llama_kv_cache_unified::seq_add(llama_seq_id seq_id, llama_pos p0, llama_po
344
259
  return;
345
260
  }
346
261
 
347
- if (recurrent) {
348
- // for Mamba-like or RWKV models, only the pos needs to be shifted
349
- if (0 <= seq_id && seq_id < (int64_t) size) {
350
- const int32_t tail_id = cells[seq_id].tail;
351
- if (tail_id >= 0) {
352
- llama_kv_cell & cell = cells[tail_id];
353
- if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
354
- cell.pos += delta;
355
- }
356
- }
357
- }
358
- return;
359
- }
360
-
361
262
  for (uint32_t i = 0; i < size; ++i) {
362
263
  if (cells[i].has_seq_id(seq_id) && cells[i].pos >= p0 && cells[i].pos < p1) {
363
264
  has_shift = true;
@@ -400,21 +301,6 @@ void llama_kv_cache_unified::seq_div(llama_seq_id seq_id, llama_pos p0, llama_po
400
301
  return;
401
302
  }
402
303
 
403
- if (recurrent) {
404
- // for Mamba-like or RWKV models, only the pos needs to be changed
405
- if (0 <= seq_id && seq_id < (int64_t) size) {
406
- const int32_t tail_id = cells[seq_id].tail;
407
- if (tail_id >= 0) {
408
- llama_kv_cell & cell = cells[tail_id];
409
- if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
410
- cell.pos /= d;
411
- }
412
- }
413
- }
414
-
415
- return;
416
- }
417
-
418
304
  for (uint32_t i = 0; i < size; ++i) {
419
305
  if (cells[i].has_seq_id(seq_id) && cells[i].pos >= p0 && cells[i].pos < p1) {
420
306
  has_shift = true;
@@ -440,23 +326,11 @@ llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) const {
440
326
  return result;
441
327
  }
442
328
 
443
- void llama_kv_cache_unified::defrag() {
444
- if (!recurrent) {
445
- do_defrag = true;
446
- }
447
- }
448
-
449
329
  void llama_kv_cache_unified::restore() {
450
330
  if (pending.ranges.empty()) {
451
331
  return;
452
332
  }
453
333
 
454
- // TODO: tmp - move to llama_kv_cache_recurrent
455
- if (recurrent) {
456
- seq_rm(-1, -1, -1);
457
- return;
458
- }
459
-
460
334
  uint32_t new_head = size;
461
335
 
462
336
  for (auto & range : pending.ranges) {
@@ -469,7 +343,6 @@ void llama_kv_cache_unified::restore() {
469
343
  }
470
344
 
471
345
  cells[i].pos = -1;
472
- cells[i].src = -1;
473
346
  }
474
347
 
475
348
  new_head = std::min(new_head, range.c0);
@@ -481,11 +354,6 @@ void llama_kv_cache_unified::restore() {
481
354
  }
482
355
 
483
356
  void llama_kv_cache_unified::commit() {
484
- // TODO: tmp - move to llama_kv_cache_recurrent
485
- if (recurrent) {
486
- return;
487
- }
488
-
489
357
  if (pending.ranges.empty()) {
490
358
  LLAMA_LOG_WARN("%s: no pending KV cache updates to commit - might indicate a bug (ref: %s)\n",
491
359
  __func__, "https://github.com/ggml-org/llama.cpp/pull/12695");
@@ -495,183 +363,110 @@ void llama_kv_cache_unified::commit() {
495
363
  pending.ranges.clear();
496
364
  }
497
365
 
498
- bool llama_kv_cache_unified::get_can_shift() const {
499
- return can_shift;
500
- }
366
+ bool llama_kv_cache_unified::update(llama_context & lctx) {
367
+ bool need_reserve = false;
501
368
 
502
- bool llama_kv_cache_unified::find_slot(
503
- const llama_ubatch & ubatch) {
504
- const uint32_t n_tokens = ubatch.n_tokens;
505
- const uint32_t n_seqs = ubatch.n_seqs;
506
- const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
369
+ auto * sched = lctx.get_sched();
507
370
 
508
- // if we have enough unused cells before the current head ->
509
- // better to start searching from the beginning of the cache, hoping to fill it
510
- if (head > used + 2*ubatch.n_tokens) {
511
- head = 0;
512
- }
371
+ if (has_shift) {
372
+ if (!get_can_shift()) {
373
+ LM_GGML_ABORT("The current KV cache / model configuration does not support K-shift");
374
+ }
513
375
 
514
- if (recurrent) {
515
- // For recurrent state architectures (like Mamba or RWKV),
516
- // each cache cell can store the state for a whole sequence.
517
- // A slot should be always be contiguous.
376
+ LLAMA_LOG_DEBUG("%s: applying K-shift\n", __func__);
518
377
 
519
- // can only process batches with an equal number of new tokens in each sequence
520
- LM_GGML_ASSERT(ubatch.equal_seqs);
378
+ // apply K-shift if needed
379
+ if (hparams.rope_type != LLAMA_ROPE_TYPE_NONE) {
380
+ lm_ggml_backend_sched_reset(sched);
521
381
 
522
- int32_t min = size - 1;
523
- int32_t max = 0;
382
+ auto * gf = lctx.graph_init();
524
383
 
525
- // everything should fit if all seq_ids are smaller than the max
526
- for (uint32_t s = 0; s < n_seqs; ++s) {
527
- const uint32_t n_seq_id = ubatch.n_seq_id[s];
528
- for (uint32_t j = 0; j < n_seq_id; ++j) {
529
- const llama_seq_id seq_id = ubatch.seq_id[s][j];
384
+ auto res = build_graph_shift(lctx.get_cparams(), lctx.get_ctx_compute(), gf);
530
385
 
531
- if (seq_id < 0 || (uint32_t) seq_id >= size) {
532
- // too big seq_id
533
- // TODO: would it be possible to resize the cache instead?
534
- LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%d Try using a bigger --parallel value\n", __func__, seq_id, size);
535
- return false;
536
- }
537
- if (j > 0) {
538
- llama_kv_cell & seq = cells[seq_id];
539
- if (seq.tail >= 0) {
540
- llama_kv_cell & cell = cells[seq.tail];
541
- // clear cells from seq_ids that become shared
542
- // (should not normally happen, but let's handle it anyway)
543
- cell.seq_id.erase(seq_id);
544
- seq.tail = -1;
545
- if (cell.seq_id.empty()) {
546
- cell.pos = -1;
547
- cell.src = -1;
548
- used -= 1;
549
- }
550
- }
551
- }
552
- }
386
+ lm_ggml_backend_sched_alloc_graph(sched, gf);
387
+
388
+ res->set_inputs(nullptr);
389
+
390
+ lctx.graph_compute(gf, false);
391
+
392
+ need_reserve = true;
553
393
  }
554
394
 
555
- #ifndef NDEBUG
556
395
  {
557
- std::vector<int32_t> tails_verif;
558
- tails_verif.assign(size, -1);
559
- for (uint32_t i = 0; i < size; ++i) {
560
- llama_kv_cell & cell = cells[i];
561
- for (llama_seq_id seq_id : cell.seq_id) {
562
- if (tails_verif[seq_id] != -1) {
563
- LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]);
564
- }
565
- tails_verif[seq_id] = i;
566
- }
567
- }
396
+ has_shift = false;
397
+
568
398
  for (uint32_t i = 0; i < size; ++i) {
569
- if (tails_verif[i] != cells[i].tail) {
570
- LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cells[i].tail, tails_verif[i]);
571
- }
399
+ cells[i].delta = 0;
572
400
  }
573
401
  }
574
- #endif
402
+ }
575
403
 
576
- // find next empty cell
577
- uint32_t next_empty_cell = head;
404
+ if (do_defrag) {
405
+ LLAMA_LOG_DEBUG("%s: defragmenting KV cache\n", __func__);
578
406
 
579
- for (uint32_t i = 0; i < size; ++i) {
580
- if (next_empty_cell >= size) { next_empty_cell -= size; }
581
- llama_kv_cell & cell = cells[next_empty_cell];
582
- if (cell.is_empty()) { break; }
583
- next_empty_cell += 1;
584
- }
407
+ if (defrag_prepare(lctx.graph_max_nodes())) {
408
+ lm_ggml_backend_sched_reset(sched);
585
409
 
586
- // find usable cell range
587
- for (uint32_t s = 0; s < n_seqs; ++s) {
588
- const llama_seq_id seq_id = ubatch.seq_id[s][0];
589
- llama_kv_cell & seq_meta = cells[seq_id];
590
- bool has_cell = false;
591
- if (seq_meta.tail >= 0) {
592
- llama_kv_cell & cell = cells[seq_meta.tail];
593
- LM_GGML_ASSERT(cell.has_seq_id(seq_id));
594
- // does this seq_id "own" the cell?
595
- if (cell.seq_id.size() == 1) { has_cell = true; }
596
- }
597
- if (!has_cell) {
598
- llama_kv_cell & empty_cell = cells[next_empty_cell];
599
- LM_GGML_ASSERT(empty_cell.is_empty());
600
- // copy old tail into the empty cell
601
- if (seq_meta.tail >= 0) {
602
- llama_kv_cell & orig_cell = cells[seq_meta.tail];
603
- empty_cell.pos = orig_cell.pos;
604
- empty_cell.src = orig_cell.src;
605
- orig_cell.seq_id.erase(seq_id);
606
- empty_cell.seq_id.insert(seq_id); // will be overwritten
607
- }
608
- seq_meta.tail = next_empty_cell;
609
- // find next empty cell
610
- if (s + 1 < n_seqs) {
611
- next_empty_cell += 1;
612
- for (uint32_t i = 0; i < size; ++i) {
613
- if (next_empty_cell >= size) { next_empty_cell -= size; }
614
- llama_kv_cell & cell = cells[next_empty_cell];
615
- if (cell.is_empty()) { break; }
616
- next_empty_cell += 1;
617
- }
618
- }
619
- }
620
- if (min > seq_meta.tail) { min = seq_meta.tail; }
621
- if (max < seq_meta.tail) { max = seq_meta.tail; }
622
- }
410
+ auto * gf = lctx.graph_init();
623
411
 
624
- // gather and re-order
625
- for (uint32_t s = 0; s < n_seqs; ++s) {
626
- int32_t dst_id = s + min;
627
- int32_t src_id = cells[ubatch.seq_id[s][0]].tail;
628
- if (dst_id != src_id) {
629
- llama_kv_cell & dst_cell = cells[dst_id];
630
- llama_kv_cell & src_cell = cells[src_id];
412
+ auto res = build_graph_defrag(lctx.get_cparams(), lctx.get_ctx_compute(), gf);
631
413
 
632
- std::swap(dst_cell.pos, src_cell.pos);
633
- std::swap(dst_cell.src, src_cell.src);
634
- std::swap(dst_cell.seq_id, src_cell.seq_id);
414
+ lm_ggml_backend_sched_alloc_graph(sched, gf);
635
415
 
636
- // swap tails (assuming they NEVER overlap)
637
- for (const llama_seq_id seq_id : src_cell.seq_id) {
638
- cells[seq_id].tail = src_id;
639
- }
640
- for (const llama_seq_id seq_id : dst_cell.seq_id) {
641
- cells[seq_id].tail = dst_id;
642
- }
643
- }
644
- }
416
+ res->set_inputs(nullptr);
645
417
 
646
- // update the pos of the used seqs
647
- for (uint32_t s = 0; s < n_seqs; ++s) {
648
- const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1];
649
- int32_t cell_id = s + min;
650
- llama_kv_cell & cell = cells[cell_id];
418
+ lctx.graph_compute(gf, false);
651
419
 
652
- if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) {
653
- // What should happen when the pos backtracks or skips a value?
654
- // Clearing the state mid-batch would require special-casing which isn't done.
655
- LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
656
- __func__, last_pos, cell.pos, ubatch.seq_id[s][0], n_seq_tokens);
657
- }
658
- cell.pos = last_pos;
659
- cell.seq_id.clear();
660
- for (int32_t j = 0; j < ubatch.n_seq_id[s]; ++j) {
661
- const llama_seq_id seq_id = ubatch.seq_id[s][j];
662
- cell.seq_id.insert(seq_id);
663
- cells[seq_id].tail = cell_id;
664
- }
420
+ need_reserve = true;
665
421
  }
666
422
 
667
- // allow getting the range of used cells, from head to head + n
668
- head = min;
669
- n = max - min + 1;
670
- used = std::count_if(cells.begin(), cells.end(),
671
- [](const llama_kv_cell& cell){ return !cell.is_empty(); });
423
+ do_defrag = false;
424
+ }
425
+
426
+ return need_reserve;
427
+ }
428
+
429
+ void llama_kv_cache_unified::defrag_sched(float thold) {
430
+ // - do not defrag small contexts (i.e. < 2048 tokens)
431
+ // - count the padding towards the number of used tokens
432
+ const float fragmentation = n >= 2048 ? std::max(0.0f, 1.0f - (float(used + padding)/n)) : 0.0f;
433
+
434
+ // queue defragmentation for next llama_kv_cache_update
435
+ if (fragmentation > thold) {
436
+ LLAMA_LOG_DEBUG("%s: fragmentation: %.2f - requesting defrag\n", __func__, fragmentation);
437
+
438
+ do_defrag = true;
439
+ }
440
+ }
441
+
442
+ void llama_kv_cache_unified::set_full() {
443
+ n = size;
444
+ }
445
+
446
+ llama_sbatch llama_kv_cache_unified::sbatch_init(
447
+ const llama_batch & batch,
448
+ bool logits_all) {
449
+ return llama_sbatch(batch, hparams.n_embd, true, logits_all);
450
+ }
451
+
452
+ llama_ubatch llama_kv_cache_unified::ubatch_next(
453
+ llama_sbatch & sbatch,
454
+ uint32_t n_ubatch,
455
+ bool embd_pooled) const {
456
+ LM_GGML_UNUSED(embd_pooled);
457
+ return sbatch.split_simple(n_ubatch);
458
+ }
459
+
460
+ bool llama_kv_cache_unified::find_slot(
461
+ const llama_ubatch & ubatch) {
462
+ const uint32_t n_tokens = ubatch.n_tokens;
463
+ const uint32_t n_seqs = ubatch.n_seqs;
464
+ const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
672
465
 
673
- // sanity check
674
- return n >= n_seqs;
466
+ // if we have enough unused cells before the current head ->
467
+ // better to start searching from the beginning of the cache, hoping to fill it
468
+ if (head > used + 2*ubatch.n_tokens) {
469
+ head = 0;
675
470
  }
676
471
 
677
472
  // otherwise, one cell per token.
@@ -725,24 +520,50 @@ bool llama_kv_cache_unified::find_slot(
725
520
 
726
521
  pending.ranges.push_back({head, head + n_tokens});
727
522
 
523
+ // a heuristic, to avoid attending the full cache if it is not yet utilized
524
+ // after enough generations, the benefit from this heuristic disappears
525
+ // if we start defragmenting the cache, the benefit from this will be more important
526
+ n = std::min(size, std::max(padding, LM_GGML_PAD(cell_max(), padding)));
527
+
528
+ //printf("n = %5d, used = %5d, head = %5d\n", n, used, head);
529
+
728
530
  return true;
729
531
  }
730
532
 
731
- uint32_t llama_kv_cache_unified::get_padding(const llama_cparams & cparams) const {
732
- // the FA kernels require padding to avoid extra runtime boundary checks
733
- return cparams.flash_attn ? 256u : 32u;
533
+ int32_t llama_kv_cache_unified::get_n_tokens() const {
534
+ int32_t result = 0;
535
+
536
+ for (uint32_t i = 0; i < size; i++) {
537
+ result += cells[i].seq_id.size();
538
+ }
539
+
540
+ return result;
734
541
  }
735
542
 
736
- uint32_t llama_kv_cache_unified::cell_max() const {
737
- for (uint32_t i = size; i > 0; --i) {
738
- const llama_kv_cell & cell = cells[i - 1];
543
+ int32_t llama_kv_cache_unified::get_used_cells() const {
544
+ return used;
545
+ }
739
546
 
740
- if (cell.pos >= 0 && !cell.is_empty()) {
741
- return i;
742
- }
547
+ bool llama_kv_cache_unified::get_can_shift() const {
548
+ return can_shift;
549
+ }
550
+
551
+ llama_pos llama_kv_cache_unified::get_pos_max() const {
552
+ llama_pos pos_max = -1;
553
+ for (const auto & cell : cells) {
554
+ pos_max = std::max(pos_max, cell.pos);
743
555
  }
744
556
 
745
- return 0;
557
+ return pos_max;
558
+ }
559
+
560
+ size_t llama_kv_cache_unified::total_size() const {
561
+ size_t size = 0;
562
+ for (const auto & buf : bufs) {
563
+ size += lm_ggml_backend_buffer_get_size(buf.get());
564
+ }
565
+
566
+ return size;
746
567
  }
747
568
 
748
569
  size_t llama_kv_cache_unified::size_k_bytes() const {
@@ -765,68 +586,331 @@ size_t llama_kv_cache_unified::size_v_bytes() const {
765
586
  return size_v_bytes;
766
587
  }
767
588
 
768
- bool llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) {
769
- const uint32_t n_layer = hparams.n_layer;
589
+ lm_ggml_tensor * llama_kv_cache_unified::build_rope_shift(
590
+ const llama_cparams & cparams,
591
+ lm_ggml_context * ctx,
592
+ lm_ggml_tensor * cur,
593
+ lm_ggml_tensor * shift,
594
+ lm_ggml_tensor * factors,
595
+ float freq_base,
596
+ float freq_scale) const {
597
+ const auto & n_ctx_orig = cparams.n_ctx_orig_yarn;
770
598
 
771
- const uint32_t n_kv = cell_max();
772
- const uint32_t n_used = used;
599
+ const auto & yarn_ext_factor = cparams.yarn_ext_factor;
600
+ const auto & yarn_beta_fast = cparams.yarn_beta_fast;
601
+ const auto & yarn_beta_slow = cparams.yarn_beta_slow;
773
602
 
774
- assert(n_used <= n_kv);
603
+ const auto & n_rot = hparams.n_rot;
604
+ const auto & rope_type = hparams.rope_type;
775
605
 
776
- //const int64_t t_start = lm_ggml_time_us();
606
+ // See llm_build_deepseek2() for why attn_factor has to be scaled for YaRN RoPE to work correctly.
607
+ // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation.
608
+ const float yarn_attn_factor = model.arch == LLM_ARCH_DEEPSEEK2 ? 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)) : cparams.yarn_attn_factor;
777
609
 
778
- // number of cells moved
779
- uint32_t n_moves = 0;
610
+ lm_ggml_tensor * tmp;
780
611
 
781
- // each move requires 6*n_layer tensors (see graph_build_kv_self_defrag)
782
- // - source view, destination view, copy operation
783
- // - x2 for keys and values
784
- //const uint32_t max_moves = max_nodes()/(6*n_layer);
785
- // TODO: tmp fix https://github.com/ggerganov/llama.cpp/issues/6685#issuecomment-2057579516
786
- const uint32_t max_moves = (n_max_nodes - 2*n_layer)/(6*n_layer);
612
+ if (lm_ggml_is_quantized(cur->type)) {
613
+ // dequantize to f32 -> RoPE -> quantize back
614
+ tmp = lm_ggml_cast(ctx, cur, LM_GGML_TYPE_F32);
787
615
 
788
- // determine which KV cells to move where
789
- //
790
- // cell i moves to ids[i]
791
- //
792
- // if ids[i] == i || ids[i] == n_kv, then cell i is not moved
793
- //
794
- auto & ids = defrag_info.ids;
616
+ tmp = lm_ggml_rope_ext(ctx, tmp,
617
+ shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
618
+ yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
795
619
 
796
- ids.clear();
797
- ids.resize(n_kv, n_kv);
620
+ tmp = lm_ggml_cpy(ctx, tmp, cur);
621
+ } else {
622
+ // we rotate only the first n_rot dimensions
623
+ tmp = lm_ggml_rope_ext_inplace(ctx, cur,
624
+ shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
625
+ yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
626
+ }
798
627
 
799
- for (uint32_t i0 = 0; i0 < n_used; ++i0) {
800
- const auto & cell0 = cells[i0];
628
+ return tmp;
629
+ }
801
630
 
802
- if (!cell0.is_empty()) {
803
- ids[i0] = i0;
631
+ class llm_graph_input_k_shift : public llm_graph_input_i {
632
+ public:
633
+ llm_graph_input_k_shift(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {}
634
+ virtual ~llm_graph_input_k_shift() = default;
804
635
 
805
- continue;
806
- }
636
+ void set_input(const llama_ubatch * ubatch) override;
807
637
 
808
- // found a hole - fill it with data from the end of the cache
638
+ lm_ggml_tensor * k_shift; // I32 [kv_size]
809
639
 
810
- uint32_t nh = 1;
640
+ const llama_kv_cache_unified * kv_self;
641
+ };
811
642
 
812
- // determine the size of the hole
813
- while (i0 + nh < n_used && cells[i0 + nh].is_empty()) {
814
- nh++;
643
+ void llm_graph_input_k_shift::set_input(const llama_ubatch * ubatch) {
644
+ LM_GGML_UNUSED(ubatch);
645
+
646
+ if (k_shift) {
647
+ assert(lm_ggml_backend_buffer_is_host(k_shift->buffer));
648
+
649
+ int32_t * data = (int32_t *) k_shift->data;
650
+
651
+ for (uint32_t i = 0; i < kv_self->size; ++i) {
652
+ data[i] = kv_self->cells[i].delta;
815
653
  }
654
+ }
655
+ }
816
656
 
817
- uint32_t nf = 0;
818
- uint32_t is = n_kv - 1;
657
+ llm_graph_result_ptr llama_kv_cache_unified::build_graph_shift(
658
+ const llama_cparams & cparams,
659
+ lm_ggml_context * ctx,
660
+ lm_ggml_cgraph * gf) const {
661
+ auto res = std::make_unique<llm_graph_result>();
819
662
 
820
- // starting from the end, find nh non-empty cells
821
- for (; is > i0; --is) {
822
- const auto & cell1 = cells[is];
663
+ const auto & n_layer = hparams.n_layer;
823
664
 
824
- if (cell1.is_empty() || ids[is] != n_kv) {
825
- continue;
826
- }
665
+ const auto & n_embd_head_k = hparams.n_embd_head_k;
666
+ //const auto & n_embd_head_v = hparams.n_embd_head_v;
827
667
 
828
- // non-empty cell which is not yet moved
829
- nf++;
668
+ const uint32_t n_ctx_per_seq = cparams.n_ctx / cparams.n_seq_max;
669
+
670
+ //LM_GGML_ASSERT(kv_self->size == n_ctx);
671
+
672
+ auto inp = std::make_unique<llm_graph_input_k_shift>(this);
673
+
674
+ inp->k_shift = lm_ggml_new_tensor_1d(ctx, LM_GGML_TYPE_I32, cparams.n_ctx);
675
+ lm_ggml_set_input(inp->k_shift);
676
+
677
+ for (uint32_t il = 0; il < n_layer; ++il) {
678
+ const int64_t n_head_kv = hparams.n_head_kv(il);
679
+ const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
680
+
681
+ const bool is_swa = hparams.is_swa(il);
682
+
683
+ // note: the swa rope params could become part of the cparams in the future
684
+ // if we decide to make them configurable, like the non-sliding ones
685
+ const float freq_base_l = is_swa ? hparams.rope_freq_base_train_swa : cparams.rope_freq_base;
686
+ const float freq_scale_l = is_swa ? hparams.rope_freq_scale_train_swa : cparams.rope_freq_scale;
687
+
688
+ lm_ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il);
689
+
690
+ lm_ggml_tensor * k =
691
+ lm_ggml_view_3d(ctx, k_l[il],
692
+ n_embd_head_k, n_head_kv, size,
693
+ lm_ggml_row_size(k_l[il]->type, n_embd_head_k),
694
+ lm_ggml_row_size(k_l[il]->type, n_embd_k_gqa),
695
+ 0);
696
+
697
+ lm_ggml_tensor * cur = build_rope_shift(cparams, ctx, k, inp->k_shift, rope_factors, freq_base_l, freq_scale_l);
698
+
699
+ lm_ggml_build_forward_expand(gf, cur);
700
+ }
701
+
702
+ res->add_input(std::move(inp));
703
+
704
+ return res;
705
+ }
706
+
707
+ llm_graph_result_ptr llama_kv_cache_unified::build_graph_defrag(
708
+ const llama_cparams & cparams,
709
+ lm_ggml_context * ctx,
710
+ lm_ggml_cgraph * gf) const {
711
+ auto res = std::make_unique<llm_graph_result>();
712
+
713
+ const auto & ids = defrag_info.ids;
714
+
715
+ #if 0
716
+ // CPU defrag
717
+ //
718
+ // TODO: optimizations are possible:
719
+ // - multiple threads
720
+ // - avoid copying to the host memory when already there
721
+ //
722
+ // likely not worth the effort, as we have lm_ggml_graph based defrag
723
+ //
724
+
725
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa();
726
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa();
727
+
728
+ const uint32_t kv_size = size;
729
+
730
+ std::vector<uint8_t> buf_k;
731
+ std::vector<uint8_t> buf_v;
732
+
733
+ for (uint32_t il = 0; il < n_layer; ++il) {
734
+ const size_t k_size_row = lm_ggml_row_size(k_l[il]->type, n_embd_k_gqa);
735
+ const size_t k_size = lm_ggml_row_size(k_l[il]->type, n_embd_k_gqa*kv_size);
736
+
737
+ const size_t v_size_el = lm_ggml_type_size(v_l[il]->type);
738
+ const size_t v_size = lm_ggml_row_size (v_l[il]->type, n_embd_v_gqa*kv_size);
739
+
740
+ buf_k.resize(k_size);
741
+ buf_v.resize(v_size);
742
+
743
+ lm_ggml_backend_tensor_get(k_l[il], buf_k.data(), 0, buf_k.size());
744
+ lm_ggml_backend_tensor_get(v_l[il], buf_v.data(), 0, buf_v.size());
745
+
746
+ // batch move [i, i+nm) to [id, id+nm)
747
+ // note: cells can move only to a lower index
748
+ for (uint32_t i = 0; i < n_kv; ++i) {
749
+ const uint32_t id = ids[i];
750
+
751
+ if (i == id || id == n_kv) {
752
+ continue;
753
+ }
754
+
755
+ uint32_t nm = 1;
756
+
757
+ while (i + nm < n_kv && ids[i + nm] == id + nm) {
758
+ nm++;
759
+ }
760
+
761
+ // move keys
762
+ {
763
+ const int64_t os = i*k_size_row;
764
+ const int64_t od = id*k_size_row;
765
+
766
+ memcpy(buf_k.data() + od, buf_k.data() + os, nm*k_size_row);
767
+ }
768
+
769
+ // move values (note: they are transposed)
770
+ {
771
+ const int64_t os = i;
772
+ const int64_t od = id;
773
+
774
+ for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
775
+ memcpy(buf_v.data() + (od + j*kv_size)*v_size_el, buf_v.data() + (os + j*kv_size)*v_size_el, nm*v_size_el);
776
+ }
777
+ }
778
+
779
+ i += nm - 1;
780
+ }
781
+
782
+ lm_ggml_backend_tensor_set(k_l[il], buf_k.data(), 0, buf_k.size());
783
+ lm_ggml_backend_tensor_set(v_l[il], buf_v.data(), 0, buf_v.size());
784
+ }
785
+ #else
786
+ for (uint32_t i = 0; i < ids.size(); ++i) {
787
+ const uint32_t id = ids[i];
788
+
789
+ if (i == id || id == ids.size()) {
790
+ continue;
791
+ }
792
+
793
+ uint32_t nm = 1;
794
+
795
+ while (i + nm < ids.size() && ids[i + nm] == id + nm) {
796
+ nm++;
797
+ }
798
+
799
+ for (uint32_t il = 0; il < hparams.n_layer; ++il) { // NOLINT
800
+ const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
801
+ const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
802
+
803
+ lm_ggml_tensor * view_k_src = lm_ggml_view_2d(ctx, k_l[il],
804
+ n_embd_k_gqa, nm,
805
+ lm_ggml_row_size(k_l[il]->type, n_embd_k_gqa),
806
+ lm_ggml_row_size(k_l[il]->type, n_embd_k_gqa*i));
807
+
808
+ lm_ggml_tensor * view_k_dst = lm_ggml_view_2d(ctx, k_l[il],
809
+ n_embd_k_gqa, nm,
810
+ lm_ggml_row_size(k_l[il]->type, n_embd_k_gqa),
811
+ lm_ggml_row_size(k_l[il]->type, n_embd_k_gqa*id));
812
+
813
+ lm_ggml_tensor * view_v_src;
814
+ lm_ggml_tensor * view_v_dst;
815
+
816
+ if (cparams.flash_attn) {
817
+ // NOTE: the V cache is not transposed when using flash attention
818
+ view_v_src = lm_ggml_view_2d(ctx, v_l[il],
819
+ n_embd_v_gqa, nm,
820
+ lm_ggml_row_size(v_l[il]->type, n_embd_v_gqa),
821
+ lm_ggml_row_size(v_l[il]->type, n_embd_v_gqa*i));
822
+
823
+ view_v_dst = lm_ggml_view_2d(ctx, v_l[il],
824
+ n_embd_v_gqa, nm,
825
+ lm_ggml_row_size(v_l[il]->type, n_embd_v_gqa),
826
+ lm_ggml_row_size(v_l[il]->type, n_embd_v_gqa*id));
827
+ } else {
828
+ view_v_src = lm_ggml_view_2d(ctx, v_l[il],
829
+ nm, n_embd_v_gqa,
830
+ lm_ggml_row_size(v_l[il]->type, size),
831
+ lm_ggml_row_size(v_l[il]->type, i));
832
+
833
+ view_v_dst = lm_ggml_view_2d(ctx, v_l[il],
834
+ nm, n_embd_v_gqa,
835
+ lm_ggml_row_size(v_l[il]->type, size),
836
+ lm_ggml_row_size(v_l[il]->type, id));
837
+ }
838
+
839
+ lm_ggml_build_forward_expand(gf, lm_ggml_cpy(ctx, view_k_src, view_k_dst));
840
+ lm_ggml_build_forward_expand(gf, lm_ggml_cpy(ctx, view_v_src, view_v_dst));
841
+ }
842
+
843
+ i += nm - 1;
844
+ }
845
+
846
+ //LLAMA_LOG_INFO("gf->n_nodes = %d\n", gf->n_nodes);
847
+ #endif
848
+
849
+ return res;
850
+ }
851
+
852
+ bool llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) {
853
+ const uint32_t n_layer = hparams.n_layer;
854
+
855
+ const uint32_t n_kv = cell_max();
856
+ const uint32_t n_used = used;
857
+
858
+ assert(n_used <= n_kv);
859
+
860
+ //const int64_t t_start = lm_ggml_time_us();
861
+
862
+ // number of cells moved
863
+ uint32_t n_moves = 0;
864
+
865
+ // each move requires 6*n_layer tensors (see graph_build_kv_self_defrag)
866
+ // - source view, destination view, copy operation
867
+ // - x2 for keys and values
868
+ //const uint32_t max_moves = max_nodes()/(6*n_layer);
869
+ // TODO: tmp fix https://github.com/ggerganov/llama.cpp/issues/6685#issuecomment-2057579516
870
+ const uint32_t max_moves = (n_max_nodes - 2*n_layer)/(6*n_layer);
871
+
872
+ // determine which KV cells to move where
873
+ //
874
+ // cell i moves to ids[i]
875
+ //
876
+ // if ids[i] == i || ids[i] == n_kv, then cell i is not moved
877
+ //
878
+ auto & ids = defrag_info.ids;
879
+
880
+ ids.clear();
881
+ ids.resize(n_kv, n_kv);
882
+
883
+ for (uint32_t i0 = 0; i0 < n_used; ++i0) {
884
+ const auto & cell0 = cells[i0];
885
+
886
+ if (!cell0.is_empty()) {
887
+ ids[i0] = i0;
888
+
889
+ continue;
890
+ }
891
+
892
+ // found a hole - fill it with data from the end of the cache
893
+
894
+ uint32_t nh = 1;
895
+
896
+ // determine the size of the hole
897
+ while (i0 + nh < n_used && cells[i0 + nh].is_empty()) {
898
+ nh++;
899
+ }
900
+
901
+ uint32_t nf = 0;
902
+ uint32_t is = n_kv - 1;
903
+
904
+ // starting from the end, find nh non-empty cells
905
+ for (; is > i0; --is) {
906
+ const auto & cell1 = cells[is];
907
+
908
+ if (cell1.is_empty() || ids[is] != n_kv) {
909
+ continue;
910
+ }
911
+
912
+ // non-empty cell which is not yet moved
913
+ nf++;
830
914
 
831
915
  if (nf == nh) {
832
916
  break;
@@ -867,7 +951,7 @@ bool llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) {
867
951
  cells[i0 + nf] = cell1;
868
952
 
869
953
  // clear the old cell and move the head there
870
- cell1 = llama_kv_cell();
954
+ cell1 = kv_cell();
871
955
  head = n_used;
872
956
 
873
957
  if (!cont) {
@@ -895,13 +979,25 @@ bool llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) {
895
979
  return false;
896
980
  }
897
981
 
898
- LLAMA_LOG_DEBUG("(tmp log) KV defrag cell moves: %u\n", n_moves);
982
+ LLAMA_LOG_DEBUG("%s: (tmp log) KV defrag cell moves: %u\n", __func__, n_moves);
899
983
 
900
- LLAMA_LOG_DEBUG("expected gf nodes: %u\n", 6*n_moves*n_layer);
984
+ LLAMA_LOG_DEBUG("%s: expected gf nodes: %u\n", __func__, 6*n_moves*n_layer);
901
985
 
902
986
  return true;
903
987
  }
904
988
 
989
+ uint32_t llama_kv_cache_unified::cell_max() const {
990
+ for (uint32_t i = size; i > 0; --i) {
991
+ const kv_cell & cell = cells[i - 1];
992
+
993
+ if (cell.pos >= 0 && !cell.is_empty()) {
994
+ return i;
995
+ }
996
+ }
997
+
998
+ return 0;
999
+ }
1000
+
905
1001
  void llama_kv_cache_unified::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
906
1002
  std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
907
1003
  uint32_t cell_count = 0;
@@ -1110,7 +1206,7 @@ bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell
1110
1206
  clear();
1111
1207
 
1112
1208
  for (uint32_t i = 0; i < cell_count; ++i) {
1113
- llama_kv_cell & cell = cells[i];
1209
+ kv_cell & cell = cells[i];
1114
1210
 
1115
1211
  llama_pos pos;
1116
1212
  uint32_t n_seq_id;
@@ -1133,15 +1229,6 @@ bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell
1133
1229
  }
1134
1230
 
1135
1231
  cell.seq_id.insert(seq_id);
1136
-
1137
- if (recurrent) {
1138
- int32_t & tail = cells[seq_id].tail;
1139
- if (tail != -1) {
1140
- LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail);
1141
- return false;
1142
- }
1143
- tail = i;
1144
- }
1145
1232
  }
1146
1233
  }
1147
1234
 
@@ -1149,14 +1236,6 @@ bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell
1149
1236
  used = cell_count;
1150
1237
  }
1151
1238
 
1152
- if (recurrent) {
1153
- for (uint32_t i = 0; i < cell_count; ++i) {
1154
- uint32_t cell_id = head + i;
1155
- // make sure the recurrent states will keep their restored state
1156
- cells[cell_id].src = cell_id;
1157
- }
1158
- }
1159
-
1160
1239
  return true;
1161
1240
  }
1162
1241
 
@@ -1174,7 +1253,1034 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell
1174
1253
  LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size);
1175
1254
  return false;
1176
1255
  }
1177
- if (v_trans != (bool) v_trans) {
1256
+ if (this->v_trans != (bool) v_trans) {
1257
+ LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
1258
+ return false;
1259
+ }
1260
+
1261
+ // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
1262
+ for (uint32_t il = 0; il < n_layer; ++il) {
1263
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
1264
+
1265
+ // Read type of key
1266
+ int32_t k_type_i_ref;
1267
+ io.read_to(&k_type_i_ref, sizeof(k_type_i_ref));
1268
+ const int32_t k_type_i = (int32_t) k_l[il]->type;
1269
+ if (k_type_i != k_type_i_ref) {
1270
+ LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
1271
+ return false;
1272
+ }
1273
+
1274
+ // Read row size of key
1275
+ uint64_t k_size_row_ref;
1276
+ io.read_to(&k_size_row_ref, sizeof(k_size_row_ref));
1277
+ const size_t k_size_row = lm_ggml_row_size(k_l[il]->type, n_embd_k_gqa);
1278
+ if (k_size_row != k_size_row_ref) {
1279
+ LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
1280
+ return false;
1281
+ }
1282
+
1283
+ if (cell_count) {
1284
+ // Read and set the keys for the whole cell range
1285
+ lm_ggml_backend_tensor_set(k_l[il], io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row);
1286
+ }
1287
+ }
1288
+
1289
+ if (!this->v_trans) {
1290
+ for (uint32_t il = 0; il < n_layer; ++il) {
1291
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
1292
+
1293
+ // Read type of value
1294
+ int32_t v_type_i_ref;
1295
+ io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
1296
+ const int32_t v_type_i = (int32_t)v_l[il]->type;
1297
+ if (v_type_i != v_type_i_ref) {
1298
+ LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
1299
+ return false;
1300
+ }
1301
+
1302
+ // Read row size of value
1303
+ uint64_t v_size_row_ref;
1304
+ io.read_to(&v_size_row_ref, sizeof(v_size_row_ref));
1305
+ const size_t v_size_row = lm_ggml_row_size(v_l[il]->type, n_embd_v_gqa);
1306
+ if (v_size_row != v_size_row_ref) {
1307
+ LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
1308
+ return false;
1309
+ }
1310
+
1311
+ if (cell_count) {
1312
+ // Read and set the values for the whole cell range
1313
+ lm_ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row);
1314
+ }
1315
+ }
1316
+ } else {
1317
+ // For each layer, read the values for each cell (transposed)
1318
+ for (uint32_t il = 0; il < n_layer; ++il) {
1319
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
1320
+
1321
+ // Read type of value
1322
+ int32_t v_type_i_ref;
1323
+ io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
1324
+ const int32_t v_type_i = (int32_t)v_l[il]->type;
1325
+ if (v_type_i != v_type_i_ref) {
1326
+ LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
1327
+ return false;
1328
+ }
1329
+
1330
+ // Read element size of value
1331
+ uint32_t v_size_el_ref;
1332
+ io.read_to(&v_size_el_ref, sizeof(v_size_el_ref));
1333
+ const size_t v_size_el = lm_ggml_type_size(v_l[il]->type);
1334
+ if (v_size_el != v_size_el_ref) {
1335
+ LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
1336
+ return false;
1337
+ }
1338
+
1339
+ // Read GQA embedding size
1340
+ uint32_t n_embd_v_gqa_ref;
1341
+ io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref));
1342
+ if (n_embd_v_gqa != n_embd_v_gqa_ref) {
1343
+ LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il);
1344
+ return false;
1345
+ }
1346
+
1347
+ if (cell_count) {
1348
+ // For each row in the transposed matrix, read the values for the whole cell range
1349
+ for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
1350
+ const size_t dst_offset = (head + j * size) * v_size_el;
1351
+ lm_ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
1352
+ }
1353
+ }
1354
+ }
1355
+ }
1356
+
1357
+ return true;
1358
+ }
1359
+
1360
+ //
1361
+ // llama_kv_cache_recurrent
1362
+ //
1363
+
1364
+ llama_kv_cache_recurrent::llama_kv_cache_recurrent(
1365
+ const llama_model & model,
1366
+ lm_ggml_type type_k,
1367
+ lm_ggml_type type_v,
1368
+ bool offload,
1369
+ uint32_t kv_size) : hparams(model.hparams) {
1370
+ const int32_t n_layer = hparams.n_layer;
1371
+
1372
+ LLAMA_LOG_INFO("%s: kv_size = %d, type_k = '%s', type_v = '%s', n_layer = %d\n",
1373
+ __func__, kv_size, lm_ggml_type_name(type_k), lm_ggml_type_name(type_v), n_layer);
1374
+
1375
+ head = 0;
1376
+ size = kv_size;
1377
+ used = 0;
1378
+
1379
+ this->type_k = type_k;
1380
+ this->type_v = type_v;
1381
+
1382
+ cells.clear();
1383
+ cells.resize(kv_size);
1384
+
1385
+ // create a context for each buffer type
1386
+ std::map<lm_ggml_backend_buffer_type_t, lm_ggml_context *> ctx_map;
1387
+ auto ctx_for_buft = [&](lm_ggml_backend_buffer_type_t buft) -> lm_ggml_context * {
1388
+ auto it = ctx_map.find(buft);
1389
+ if (it == ctx_map.end()) {
1390
+ lm_ggml_init_params params = {
1391
+ /*.mem_size =*/ size_t(2u*n_layer*lm_ggml_tensor_overhead()),
1392
+ /*.mem_buffer =*/ NULL,
1393
+ /*.no_alloc =*/ true,
1394
+ };
1395
+
1396
+ lm_ggml_context * ctx = lm_ggml_init(params);
1397
+ if (!ctx) {
1398
+ return nullptr;
1399
+ }
1400
+
1401
+ ctx_map[buft] = ctx;
1402
+ ctxs.emplace_back(ctx);
1403
+
1404
+ return ctx;
1405
+ }
1406
+
1407
+ return it->second;
1408
+ };
1409
+
1410
+ k_l.reserve(n_layer);
1411
+ v_l.reserve(n_layer);
1412
+
1413
+ for (int i = 0; i < n_layer; i++) {
1414
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
1415
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
1416
+
1417
+ const char * dev_name = "CPU";
1418
+
1419
+ lm_ggml_backend_buffer_type_t buft = lm_ggml_backend_cpu_buffer_type();
1420
+
1421
+ if (offload) {
1422
+ auto * dev = model.dev_layer(i);
1423
+ buft = lm_ggml_backend_dev_buffer_type(dev);
1424
+
1425
+ dev_name = lm_ggml_backend_dev_name(dev);
1426
+ }
1427
+
1428
+ LLAMA_LOG_DEBUG("%s, layer %3d: dev = %s\n", __func__, i, dev_name);
1429
+
1430
+ lm_ggml_context * ctx = ctx_for_buft(buft);
1431
+ if (!ctx) {
1432
+ throw std::runtime_error("failed to create ggml context for kv cache");
1433
+ }
1434
+
1435
+ lm_ggml_tensor * k = lm_ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size);
1436
+ lm_ggml_tensor * v = lm_ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size);
1437
+ lm_ggml_format_name(k, "cache_k_l%d", i);
1438
+ lm_ggml_format_name(v, "cache_v_l%d", i);
1439
+ k_l.push_back(k);
1440
+ v_l.push_back(v);
1441
+ }
1442
+
1443
+ // allocate tensors and initialize the buffers to avoid NaNs in the padding
1444
+ for (auto it : ctx_map) {
1445
+ auto * buft = it.first;
1446
+ auto * ctx = it.second;
1447
+
1448
+ lm_ggml_backend_buffer_t buf = lm_ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
1449
+ if (!buf) {
1450
+ throw std::runtime_error("failed to allocate buffer for kv cache");
1451
+ }
1452
+ lm_ggml_backend_buffer_clear(buf, 0);
1453
+ LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, lm_ggml_backend_buffer_name(buf), lm_ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
1454
+ bufs.emplace_back(buf);
1455
+ }
1456
+
1457
+ {
1458
+ const size_t memory_size_k = size_k_bytes();
1459
+ const size_t memory_size_v = size_v_bytes();
1460
+
1461
+ LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
1462
+ (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
1463
+ lm_ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
1464
+ lm_ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
1465
+ }
1466
+ }
1467
+
1468
+ void llama_kv_cache_recurrent::clear() {
1469
+ for (int32_t i = 0; i < (int32_t) size; ++i) {
1470
+ cells[i].pos = -1;
1471
+ cells[i].seq_id.clear();
1472
+ cells[i].src = -1;
1473
+ cells[i].tail = -1;
1474
+ }
1475
+ head = 0;
1476
+ used = 0;
1477
+
1478
+ for (auto & buf : bufs) {
1479
+ lm_ggml_backend_buffer_clear(buf.get(), 0);
1480
+ }
1481
+ }
1482
+
1483
+ bool llama_kv_cache_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
1484
+ uint32_t new_head = size;
1485
+
1486
+ if (p0 < 0) {
1487
+ p0 = 0;
1488
+ }
1489
+
1490
+ if (p1 < 0) {
1491
+ p1 = std::numeric_limits<llama_pos>::max();
1492
+ }
1493
+
1494
+ // models like Mamba or RWKV can't have a state partially erased
1495
+ if (seq_id >= (int64_t) size) {
1496
+ // could be fatal
1497
+ return false;
1498
+ }
1499
+ if (0 <= seq_id) {
1500
+ int32_t & tail_id = cells[seq_id].tail;
1501
+ if (tail_id >= 0) {
1502
+ const kv_cell & cell = cells[tail_id];
1503
+ // partial intersection is invalid
1504
+ if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) {
1505
+ return false;
1506
+ }
1507
+ // invalidate tails which will be cleared
1508
+ if (p0 <= cell.pos && cell.pos < p1) {
1509
+ tail_id = -1;
1510
+ }
1511
+ }
1512
+ } else {
1513
+ // seq_id is negative, then the range should include everything or nothing
1514
+ if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits<llama_pos>::max())) {
1515
+ return false;
1516
+ }
1517
+ }
1518
+
1519
+ for (uint32_t i = 0; i < size; ++i) {
1520
+ if (cells[i].pos >= p0 && cells[i].pos < p1) {
1521
+ if (seq_id < 0) {
1522
+ cells[i].seq_id.clear();
1523
+ } else if (cells[i].has_seq_id(seq_id)) {
1524
+ cells[i].seq_id.erase(seq_id);
1525
+ } else {
1526
+ continue;
1527
+ }
1528
+ if (cells[i].is_empty()) {
1529
+ // keep count of the number of used cells
1530
+ if (cells[i].pos >= 0) {
1531
+ used--;
1532
+ }
1533
+ cells[i].pos = -1;
1534
+ cells[i].src = -1;
1535
+ if (new_head == size) {
1536
+ new_head = i;
1537
+ }
1538
+ }
1539
+ }
1540
+ }
1541
+
1542
+ // If we freed up a slot, set head to it so searching can start there.
1543
+ if (new_head != size && new_head < head) {
1544
+ head = new_head;
1545
+ }
1546
+
1547
+ return true;
1548
+ }
1549
+
1550
+ void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
1551
+ if (seq_id_src == seq_id_dst) {
1552
+ return;
1553
+ }
1554
+
1555
+ if (p0 < 0) {
1556
+ p0 = 0;
1557
+ }
1558
+
1559
+ if (p1 < 0) {
1560
+ p1 = std::numeric_limits<llama_pos>::max();
1561
+ }
1562
+
1563
+ if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) {
1564
+ kv_cell & tail_src = cells[seq_id_src];
1565
+ kv_cell & tail_dst = cells[seq_id_dst];
1566
+ if (tail_dst.tail >= 0) {
1567
+ // clear destination seq_id if it wasn't empty
1568
+ kv_cell & cell_dst = cells[tail_dst.tail];
1569
+
1570
+ cell_dst.seq_id.erase(seq_id_dst);
1571
+ tail_dst.tail = -1;
1572
+ if (cell_dst.seq_id.empty()) {
1573
+ cell_dst.pos = -1;
1574
+ cell_dst.src = -1;
1575
+ used -= 1;
1576
+ }
1577
+ }
1578
+ if (tail_src.tail >= 0) {
1579
+ kv_cell & cell_src = cells[tail_src.tail];
1580
+
1581
+ cell_src.seq_id.insert(seq_id_dst);
1582
+ tail_dst.tail = tail_src.tail;
1583
+ }
1584
+ }
1585
+ }
1586
+
1587
+ void llama_kv_cache_recurrent::seq_keep(llama_seq_id seq_id) {
1588
+ uint32_t new_head = size;
1589
+
1590
+ for (uint32_t i = 0; i < size; ++i) {
1591
+ if ((llama_seq_id) i != seq_id) {
1592
+ cells[i].tail = -1;
1593
+ }
1594
+
1595
+ if (!cells[i].has_seq_id(seq_id)) {
1596
+ if (cells[i].pos >= 0) {
1597
+ used--;
1598
+ }
1599
+
1600
+ cells[i].pos = -1;
1601
+ cells[i].src = -1;
1602
+ cells[i].seq_id.clear();
1603
+
1604
+ if (new_head == size){
1605
+ new_head = i;
1606
+ }
1607
+ } else {
1608
+ cells[i].seq_id.clear();
1609
+ cells[i].seq_id.insert(seq_id);
1610
+ }
1611
+ }
1612
+
1613
+ // If we freed up a slot, set head to it so searching can start there.
1614
+ if (new_head != size && new_head < head) {
1615
+ head = new_head;
1616
+ }
1617
+ }
1618
+
1619
+ void llama_kv_cache_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) {
1620
+ if (delta == 0) {
1621
+ return;
1622
+ }
1623
+
1624
+ if (p0 < 0) {
1625
+ p0 = 0;
1626
+ }
1627
+
1628
+ if (p1 < 0) {
1629
+ p1 = std::numeric_limits<llama_pos>::max();
1630
+ }
1631
+
1632
+ // If there is no range then return early to avoid looping over the
1633
+ if (p0 == p1) {
1634
+ return;
1635
+ }
1636
+
1637
+ // for Mamba-like or RWKV models, only the pos needs to be shifted
1638
+ if (0 <= seq_id && seq_id < (int64_t) size) {
1639
+ const int32_t tail_id = cells[seq_id].tail;
1640
+ if (tail_id >= 0) {
1641
+ kv_cell & cell = cells[tail_id];
1642
+ if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
1643
+ cell.pos += delta;
1644
+ }
1645
+ }
1646
+ }
1647
+ }
1648
+
1649
+ void llama_kv_cache_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
1650
+ if (d == 1) {
1651
+ return;
1652
+ }
1653
+
1654
+ if (p0 < 0) {
1655
+ p0 = 0;
1656
+ }
1657
+
1658
+ if (p1 < 0) {
1659
+ p1 = std::numeric_limits<llama_pos>::max();
1660
+ }
1661
+
1662
+ // If there is no range then return early to avoid looping over the cache.
1663
+ if (p0 == p1) {
1664
+ return;
1665
+ }
1666
+
1667
+ // for Mamba-like or RWKV models, only the pos needs to be changed
1668
+ if (0 <= seq_id && seq_id < (int64_t) size) {
1669
+ const int32_t tail_id = cells[seq_id].tail;
1670
+ if (tail_id >= 0) {
1671
+ kv_cell & cell = cells[tail_id];
1672
+ if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
1673
+ cell.pos /= d;
1674
+ }
1675
+ }
1676
+ }
1677
+ }
1678
+
1679
+ llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const {
1680
+ llama_pos result = 0;
1681
+
1682
+ for (uint32_t i = 0; i < size; ++i) {
1683
+ if (cells[i].has_seq_id(seq_id)) {
1684
+ result = std::max(result, cells[i].pos);
1685
+ }
1686
+ }
1687
+
1688
+ return result;
1689
+ }
1690
+
1691
+ void llama_kv_cache_recurrent::restore() {
1692
+ if (pending.ranges.empty()) {
1693
+ return;
1694
+ }
1695
+
1696
+ seq_rm(-1, -1, -1);
1697
+ }
1698
+
1699
+ void llama_kv_cache_recurrent::commit() {
1700
+ pending.ranges.clear();
1701
+ }
1702
+
1703
+ bool llama_kv_cache_recurrent::update(llama_context & lctx) {
1704
+ LM_GGML_UNUSED(lctx);
1705
+ return false;
1706
+ }
1707
+
1708
+ void llama_kv_cache_recurrent::defrag_sched(float thold) {
1709
+ LM_GGML_UNUSED(thold);
1710
+ // noop
1711
+ }
1712
+
1713
+ void llama_kv_cache_recurrent::set_full() {
1714
+ n = size;
1715
+ }
1716
+
1717
+ llama_sbatch llama_kv_cache_recurrent::sbatch_init(
1718
+ const llama_batch & batch,
1719
+ bool logits_all) {
1720
+ return llama_sbatch(batch, hparams.n_embd, false, logits_all);
1721
+ }
1722
+
1723
+ llama_ubatch llama_kv_cache_recurrent::ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const {
1724
+ if (embd_pooled) {
1725
+ // Pooled embeddings cannot be split across ubatches (yet)
1726
+ return sbatch.split_seq(n_ubatch);
1727
+ }
1728
+
1729
+ return sbatch.split_equal(n_ubatch);
1730
+ }
1731
+
1732
+ bool llama_kv_cache_recurrent::find_slot(
1733
+ const llama_ubatch & ubatch) {
1734
+ const uint32_t n_tokens = ubatch.n_tokens;
1735
+ const uint32_t n_seqs = ubatch.n_seqs;
1736
+
1737
+ const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
1738
+
1739
+ // if we have enough unused cells before the current head ->
1740
+ // better to start searching from the beginning of the cache, hoping to fill it
1741
+ if (head > used + 2*n_tokens) {
1742
+ head = 0;
1743
+ }
1744
+
1745
+ // For recurrent state architectures (like Mamba or RWKV),
1746
+ // each cache cell can store the state for a whole sequence.
1747
+ // A slot should be always be contiguous.
1748
+
1749
+ // can only process batches with an equal number of new tokens in each sequence
1750
+ LM_GGML_ASSERT(ubatch.equal_seqs);
1751
+
1752
+ int32_t min = size - 1;
1753
+ int32_t max = 0;
1754
+
1755
+ // everything should fit if all seq_ids are smaller than the max
1756
+ for (uint32_t s = 0; s < n_seqs; ++s) {
1757
+ const uint32_t n_seq_id = ubatch.n_seq_id[s];
1758
+ for (uint32_t j = 0; j < n_seq_id; ++j) {
1759
+ const llama_seq_id seq_id = ubatch.seq_id[s][j];
1760
+
1761
+ if (seq_id < 0 || (uint32_t) seq_id >= size) {
1762
+ // too big seq_id
1763
+ // TODO: would it be possible to resize the cache instead?
1764
+ LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%d Try using a bigger --parallel value\n", __func__, seq_id, size);
1765
+ return false;
1766
+ }
1767
+ if (j > 0) {
1768
+ kv_cell & seq = cells[seq_id];
1769
+ if (seq.tail >= 0) {
1770
+ kv_cell & cell = cells[seq.tail];
1771
+ // clear cells from seq_ids that become shared
1772
+ // (should not normally happen, but let's handle it anyway)
1773
+ cell.seq_id.erase(seq_id);
1774
+ seq.tail = -1;
1775
+ if (cell.seq_id.empty()) {
1776
+ cell.pos = -1;
1777
+ cell.src = -1;
1778
+ used -= 1;
1779
+ }
1780
+ }
1781
+ }
1782
+ }
1783
+ }
1784
+
1785
+ #ifndef NDEBUG
1786
+ {
1787
+ std::vector<int32_t> tails_verif;
1788
+ tails_verif.assign(size, -1);
1789
+ for (uint32_t i = 0; i < size; ++i) {
1790
+ kv_cell & cell = cells[i];
1791
+ for (llama_seq_id seq_id : cell.seq_id) {
1792
+ if (tails_verif[seq_id] != -1) {
1793
+ LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]);
1794
+ }
1795
+ tails_verif[seq_id] = i;
1796
+ }
1797
+ }
1798
+ for (uint32_t i = 0; i < size; ++i) {
1799
+ if (tails_verif[i] != cells[i].tail) {
1800
+ LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cells[i].tail, tails_verif[i]);
1801
+ }
1802
+ }
1803
+ }
1804
+ #endif
1805
+
1806
+ // find next empty cell
1807
+ uint32_t next_empty_cell = head;
1808
+
1809
+ for (uint32_t i = 0; i < size; ++i) {
1810
+ if (next_empty_cell >= size) { next_empty_cell -= size; }
1811
+ kv_cell & cell = cells[next_empty_cell];
1812
+ if (cell.is_empty()) { break; }
1813
+ next_empty_cell += 1;
1814
+ }
1815
+
1816
+ // find usable cell range
1817
+ for (uint32_t s = 0; s < n_seqs; ++s) {
1818
+ const llama_seq_id seq_id = ubatch.seq_id[s][0];
1819
+ kv_cell & seq_meta = cells[seq_id];
1820
+ bool has_cell = false;
1821
+ if (seq_meta.tail >= 0) {
1822
+ kv_cell & cell = cells[seq_meta.tail];
1823
+ LM_GGML_ASSERT(cell.has_seq_id(seq_id));
1824
+ // does this seq_id "own" the cell?
1825
+ if (cell.seq_id.size() == 1) { has_cell = true; }
1826
+ }
1827
+ if (!has_cell) {
1828
+ kv_cell & empty_cell = cells[next_empty_cell];
1829
+ LM_GGML_ASSERT(empty_cell.is_empty());
1830
+ // copy old tail into the empty cell
1831
+ if (seq_meta.tail >= 0) {
1832
+ kv_cell & orig_cell = cells[seq_meta.tail];
1833
+ empty_cell.pos = orig_cell.pos;
1834
+ empty_cell.src = orig_cell.src;
1835
+ orig_cell.seq_id.erase(seq_id);
1836
+ empty_cell.seq_id.insert(seq_id); // will be overwritten
1837
+ }
1838
+ seq_meta.tail = next_empty_cell;
1839
+ // find next empty cell
1840
+ if (s + 1 < n_seqs) {
1841
+ next_empty_cell += 1;
1842
+ for (uint32_t i = 0; i < size; ++i) {
1843
+ if (next_empty_cell >= size) { next_empty_cell -= size; }
1844
+ kv_cell & cell = cells[next_empty_cell];
1845
+ if (cell.is_empty()) { break; }
1846
+ next_empty_cell += 1;
1847
+ }
1848
+ }
1849
+ }
1850
+ if (min > seq_meta.tail) { min = seq_meta.tail; }
1851
+ if (max < seq_meta.tail) { max = seq_meta.tail; }
1852
+ }
1853
+
1854
+ // gather and re-order
1855
+ for (uint32_t s = 0; s < n_seqs; ++s) {
1856
+ int32_t dst_id = s + min;
1857
+ int32_t src_id = cells[ubatch.seq_id[s][0]].tail;
1858
+ if (dst_id != src_id) {
1859
+ kv_cell & dst_cell = cells[dst_id];
1860
+ kv_cell & src_cell = cells[src_id];
1861
+
1862
+ std::swap(dst_cell.pos, src_cell.pos);
1863
+ std::swap(dst_cell.src, src_cell.src);
1864
+ std::swap(dst_cell.seq_id, src_cell.seq_id);
1865
+
1866
+ // swap tails (assuming they NEVER overlap)
1867
+ for (const llama_seq_id seq_id : src_cell.seq_id) {
1868
+ cells[seq_id].tail = src_id;
1869
+ }
1870
+ for (const llama_seq_id seq_id : dst_cell.seq_id) {
1871
+ cells[seq_id].tail = dst_id;
1872
+ }
1873
+ }
1874
+ }
1875
+
1876
+ // update the pos of the used seqs
1877
+ for (uint32_t s = 0; s < n_seqs; ++s) {
1878
+ const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1];
1879
+ int32_t cell_id = s + min;
1880
+ kv_cell & cell = cells[cell_id];
1881
+
1882
+ if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) {
1883
+ // What should happen when the pos backtracks or skips a value?
1884
+ // Clearing the state mid-batch would require special-casing which isn't done.
1885
+ LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
1886
+ __func__, last_pos, cell.pos, ubatch.seq_id[s][0], n_seq_tokens);
1887
+ }
1888
+ cell.pos = last_pos;
1889
+ cell.seq_id.clear();
1890
+ for (int32_t j = 0; j < ubatch.n_seq_id[s]; ++j) {
1891
+ const llama_seq_id seq_id = ubatch.seq_id[s][j];
1892
+ cell.seq_id.insert(seq_id);
1893
+ cells[seq_id].tail = cell_id;
1894
+ }
1895
+ }
1896
+
1897
+ // allow getting the range of used cells, from head to head + n
1898
+ head = min;
1899
+ n = max - min + 1;
1900
+ used = std::count_if(cells.begin(), cells.end(),
1901
+ [](const kv_cell & cell){ return !cell.is_empty(); });
1902
+
1903
+ // sanity check
1904
+ return n >= n_seqs;
1905
+ }
1906
+
1907
+ int32_t llama_kv_cache_recurrent::get_n_tokens() const {
1908
+ int32_t result = 0;
1909
+
1910
+ for (uint32_t i = 0; i < size; i++) {
1911
+ result += cells[i].seq_id.size();
1912
+ }
1913
+
1914
+ return result;
1915
+ }
1916
+
1917
+ int32_t llama_kv_cache_recurrent::get_used_cells() const {
1918
+ return used;
1919
+ }
1920
+
1921
+ llama_pos llama_kv_cache_recurrent::get_pos_max() const {
1922
+ llama_pos pos_max = -1;
1923
+ for (const auto & cell : cells) {
1924
+ pos_max = std::max(pos_max, cell.pos);
1925
+ }
1926
+
1927
+ return pos_max;
1928
+ }
1929
+
1930
+ bool llama_kv_cache_recurrent::get_can_shift() const {
1931
+ return false;
1932
+ }
1933
+
1934
+ int32_t llama_kv_cache_recurrent::s_copy(int i) const {
1935
+ const uint32_t cell_id = i + head;
1936
+
1937
+ //////////////////////////////////////////////
1938
+ // TODO: this should not mutate the KV cache !
1939
+ kv_cell & cell = const_cast<kv_cell &>(cells[cell_id]);
1940
+
1941
+ // prevent out-of-bound sources
1942
+ if (cell.src < 0 || (uint32_t) cell.src >= size) {
1943
+ cell.src = cell_id;
1944
+ }
1945
+
1946
+ int32_t res = cell.src;
1947
+
1948
+ // TODO: do not mutate the KV cache
1949
+ // ensure copy only happens once
1950
+ if (cell.src != (int32_t) cell_id) {
1951
+ cell.src = cell_id;
1952
+ }
1953
+
1954
+ return res;
1955
+ }
1956
+
1957
+ float llama_kv_cache_recurrent::s_mask(int i) const {
1958
+ const uint32_t cell_id = i + head;
1959
+
1960
+ //////////////////////////////////////////////
1961
+ // TODO: this should not mutate the KV cache !
1962
+ kv_cell & cell = const_cast<kv_cell &>(cells[cell_id]);
1963
+
1964
+ float res = (float) (cell.src >= 0);
1965
+
1966
+ // only clear once
1967
+ if (cell.src < 0) {
1968
+ cell.src = cell_id;
1969
+ }
1970
+
1971
+ return res;
1972
+ }
1973
+
1974
+ uint32_t llama_kv_cache_recurrent::cell_max() const {
1975
+ for (uint32_t i = size; i > 0; --i) {
1976
+ const kv_cell & cell = cells[i - 1];
1977
+
1978
+ if (cell.pos >= 0 && !cell.is_empty()) {
1979
+ return i;
1980
+ }
1981
+ }
1982
+
1983
+ return 0;
1984
+ }
1985
+
1986
+ size_t llama_kv_cache_recurrent::total_size() const {
1987
+ size_t size = 0;
1988
+ for (const auto & buf : bufs) {
1989
+ size += lm_ggml_backend_buffer_get_size(buf.get());
1990
+ }
1991
+
1992
+ return size;
1993
+ }
1994
+
1995
+ size_t llama_kv_cache_recurrent::size_k_bytes() const {
1996
+ size_t size_k_bytes = 0;
1997
+
1998
+ for (const auto & k : k_l) {
1999
+ size_k_bytes += lm_ggml_nbytes(k);
2000
+ }
2001
+
2002
+ return size_k_bytes;
2003
+ }
2004
+
2005
+ size_t llama_kv_cache_recurrent::size_v_bytes() const {
2006
+ size_t size_v_bytes = 0;
2007
+
2008
+ for (const auto & v : v_l) {
2009
+ size_v_bytes += lm_ggml_nbytes(v);
2010
+ }
2011
+
2012
+ return size_v_bytes;
2013
+ }
2014
+
2015
+ void llama_kv_cache_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
2016
+ std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
2017
+ uint32_t cell_count = 0;
2018
+
2019
+ // Count the number of cells with the specified seq_id
2020
+ // Find all the ranges of cells with this seq id (or all, when -1)
2021
+ uint32_t cell_range_begin = size;
2022
+ for (uint32_t i = 0; i < size; ++i) {
2023
+ const auto & cell = cells[i];
2024
+ if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) {
2025
+ ++cell_count;
2026
+ if (cell_range_begin == size) {
2027
+ cell_range_begin = i;
2028
+ }
2029
+ } else {
2030
+ if (cell_range_begin != size) {
2031
+ cell_ranges.emplace_back(cell_range_begin, i);
2032
+ cell_range_begin = size;
2033
+ }
2034
+ }
2035
+ }
2036
+ if (cell_range_begin != size) {
2037
+ cell_ranges.emplace_back(cell_range_begin, size);
2038
+ }
2039
+
2040
+ // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
2041
+ uint32_t cell_count_check = 0;
2042
+ for (const auto & range : cell_ranges) {
2043
+ cell_count_check += range.second - range.first;
2044
+ }
2045
+ LM_GGML_ASSERT(cell_count == cell_count_check);
2046
+
2047
+ io.write(&cell_count, sizeof(cell_count));
2048
+
2049
+ state_write_meta(io, cell_ranges, seq_id);
2050
+ state_write_data(io, cell_ranges);
2051
+ }
2052
+
2053
+ void llama_kv_cache_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
2054
+ uint32_t cell_count;
2055
+ io.read_to(&cell_count, sizeof(cell_count));
2056
+
2057
+ bool res = true;
2058
+ res = res && state_read_meta(io, cell_count, seq_id);
2059
+ res = res && state_read_data(io, cell_count);
2060
+
2061
+ if (!res) {
2062
+ if (seq_id == -1) {
2063
+ clear();
2064
+ } else {
2065
+ seq_rm(seq_id, -1, -1);
2066
+ }
2067
+ throw std::runtime_error("failed to restore kv cache");
2068
+ }
2069
+ }
2070
+
2071
+ void llama_kv_cache_recurrent::state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id) const {
2072
+ for (const auto & range : cell_ranges) {
2073
+ for (uint32_t i = range.first; i < range.second; ++i) {
2074
+ const auto & cell = cells[i];
2075
+ const llama_pos pos = cell.pos;
2076
+ const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0;
2077
+
2078
+ io.write(&pos, sizeof(pos));
2079
+ io.write(&n_seq_id, sizeof(n_seq_id));
2080
+
2081
+ if (n_seq_id) {
2082
+ for (auto seq_id : cell.seq_id) {
2083
+ io.write(&seq_id, sizeof(seq_id));
2084
+ }
2085
+ }
2086
+ }
2087
+ }
2088
+ }
2089
+
2090
+ void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const {
2091
+ const uint32_t v_trans = 0;
2092
+ const uint32_t n_layer = hparams.n_layer;
2093
+
2094
+ io.write(&v_trans, sizeof(v_trans));
2095
+ io.write(&n_layer, sizeof(n_layer));
2096
+
2097
+ std::vector<uint8_t> tmp_buf;
2098
+
2099
+ // Iterate and write all the keys first, each row is a cell
2100
+ // Get whole range at a time
2101
+ for (uint32_t il = 0; il < n_layer; ++il) {
2102
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
2103
+
2104
+ // Write key type
2105
+ const int32_t k_type_i = (int32_t)k_l[il]->type;
2106
+ io.write(&k_type_i, sizeof(k_type_i));
2107
+
2108
+ // Write row size of key
2109
+ const uint64_t k_size_row = lm_ggml_row_size(k_l[il]->type, n_embd_k_gqa);
2110
+ io.write(&k_size_row, sizeof(k_size_row));
2111
+
2112
+ // Read each range of cells of k_size length each into tmp_buf and write out
2113
+ for (const auto & range : cell_ranges) {
2114
+ const size_t range_size = range.second - range.first;
2115
+ const size_t buf_size = range_size * k_size_row;
2116
+ io.write_tensor(k_l[il], range.first * k_size_row, buf_size);
2117
+ }
2118
+ }
2119
+
2120
+ if (!v_trans) {
2121
+ for (uint32_t il = 0; il < n_layer; ++il) {
2122
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
2123
+
2124
+ // Write value type
2125
+ const int32_t v_type_i = (int32_t)v_l[il]->type;
2126
+ io.write(&v_type_i, sizeof(v_type_i));
2127
+
2128
+ // Write row size of value
2129
+ const uint64_t v_size_row = lm_ggml_row_size(v_l[il]->type, n_embd_v_gqa);
2130
+ io.write(&v_size_row, sizeof(v_size_row));
2131
+
2132
+ // Read each range of cells of v_size length each into tmp_buf and write out
2133
+ for (const auto & range : cell_ranges) {
2134
+ const size_t range_size = range.second - range.first;
2135
+ const size_t buf_size = range_size * v_size_row;
2136
+ io.write_tensor(v_l[il], range.first * v_size_row, buf_size);
2137
+ }
2138
+ }
2139
+ } else {
2140
+ // When v is transposed, we also need the element size and get the element ranges from each row
2141
+ const uint32_t kv_size = size;
2142
+ for (uint32_t il = 0; il < n_layer; ++il) {
2143
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
2144
+
2145
+ // Write value type
2146
+ const int32_t v_type_i = (int32_t)v_l[il]->type;
2147
+ io.write(&v_type_i, sizeof(v_type_i));
2148
+
2149
+ // Write element size
2150
+ const uint32_t v_size_el = lm_ggml_type_size(v_l[il]->type);
2151
+ io.write(&v_size_el, sizeof(v_size_el));
2152
+
2153
+ // Write GQA embedding size
2154
+ io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
2155
+
2156
+ // For each row, we get the element values of each cell
2157
+ for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
2158
+ // Read each range of cells of v_size_el length each into tmp_buf and write out
2159
+ for (const auto & range : cell_ranges) {
2160
+ const size_t range_size = range.second - range.first;
2161
+ const size_t src_offset = (range.first + j * kv_size) * v_size_el;
2162
+ const size_t buf_size = range_size * v_size_el;
2163
+ io.write_tensor(v_l[il], src_offset, buf_size);
2164
+ }
2165
+ }
2166
+ }
2167
+ }
2168
+ }
2169
+
2170
+ bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) {
2171
+ if (dest_seq_id != -1) {
2172
+ // single sequence
2173
+
2174
+ seq_rm(dest_seq_id, -1, -1);
2175
+
2176
+ llama_sbatch sbatch;
2177
+ llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
2178
+
2179
+ batch.n_tokens = cell_count;
2180
+ batch.n_seq_tokens = cell_count;
2181
+ batch.n_seqs = 1;
2182
+
2183
+ for (uint32_t i = 0; i < cell_count; ++i) {
2184
+ llama_pos pos;
2185
+ uint32_t n_seq_id;
2186
+
2187
+ io.read_to(&pos, sizeof(pos));
2188
+ io.read_to(&n_seq_id, sizeof(n_seq_id));
2189
+
2190
+ if (n_seq_id != 0) {
2191
+ LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
2192
+ return false;
2193
+ }
2194
+
2195
+ batch.pos[i] = pos;
2196
+ }
2197
+ batch.n_seq_id[0] = 1;
2198
+ batch.seq_id[0] = &dest_seq_id;
2199
+ if (!find_slot(batch)) {
2200
+ LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
2201
+ return false;
2202
+ }
2203
+ commit();
2204
+
2205
+ // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
2206
+ // Assume that this is one contiguous block of cells
2207
+ LM_GGML_ASSERT(head + cell_count <= size);
2208
+ LM_GGML_ASSERT(cells[head].pos == batch.pos[0]);
2209
+ LM_GGML_ASSERT(cells[head + cell_count - 1].pos == batch.pos[cell_count - 1]);
2210
+ LM_GGML_ASSERT(cells[head].has_seq_id(dest_seq_id));
2211
+ LM_GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id));
2212
+ } else {
2213
+ // whole KV cache restore
2214
+
2215
+ if (cell_count > size) {
2216
+ LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
2217
+ return false;
2218
+ }
2219
+
2220
+ clear();
2221
+
2222
+ for (uint32_t i = 0; i < cell_count; ++i) {
2223
+ kv_cell & cell = cells[i];
2224
+
2225
+ llama_pos pos;
2226
+ uint32_t n_seq_id;
2227
+
2228
+ io.read_to(&pos, sizeof(pos));
2229
+ io.read_to(&n_seq_id, sizeof(n_seq_id));
2230
+
2231
+ cell.pos = pos;
2232
+
2233
+ for (uint32_t j = 0; j < n_seq_id; ++j) {
2234
+ llama_seq_id seq_id;
2235
+ io.read_to(&seq_id, sizeof(seq_id));
2236
+
2237
+ // TODO: llama_kv_cache_recurrent should have a notion of max sequences
2238
+ //if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) {
2239
+ if (seq_id < 0) {
2240
+ //LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx));
2241
+ LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, inf)\n", __func__, seq_id);
2242
+ return false;
2243
+ }
2244
+
2245
+ cell.seq_id.insert(seq_id);
2246
+
2247
+ int32_t & tail = cells[seq_id].tail;
2248
+ if (tail != -1) {
2249
+ LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail);
2250
+ return false;
2251
+ }
2252
+ tail = i;
2253
+ }
2254
+ }
2255
+
2256
+ head = 0;
2257
+ used = cell_count;
2258
+ }
2259
+
2260
+ for (uint32_t i = 0; i < cell_count; ++i) {
2261
+ uint32_t cell_id = head + i;
2262
+ // make sure the recurrent states will keep their restored state
2263
+ cells[cell_id].src = cell_id;
2264
+ }
2265
+
2266
+ return true;
2267
+ }
2268
+
2269
+ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) {
2270
+ uint32_t v_trans;
2271
+ uint32_t n_layer;
2272
+ io.read_to(&v_trans, sizeof(v_trans));
2273
+ io.read_to(&n_layer, sizeof(n_layer));
2274
+
2275
+ if (n_layer != hparams.n_layer) {
2276
+ LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer);
2277
+ return false;
2278
+ }
2279
+ if (cell_count > size) {
2280
+ LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size);
2281
+ return false;
2282
+ }
2283
+ if (false != (bool) v_trans) {
1178
2284
  LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
1179
2285
  return false;
1180
2286
  }
@@ -1326,7 +2432,7 @@ void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache
1326
2432
  view->cells_sequences = (llama_seq_id *)p;
1327
2433
  }
1328
2434
 
1329
- const std::vector<llama_kv_cell> & kv_cells = kvu->cells;
2435
+ const std::vector<llama_kv_cache_unified::kv_cell> & kv_cells = kvu->cells;
1330
2436
  llama_kv_cache_view_cell * c_curr = view->cells;
1331
2437
  llama_seq_id * cs_curr = view->cells_sequences;
1332
2438
  int32_t used_cells = 0;