cui-llama.rn 1.6.0 → 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (285) hide show
  1. package/README.md +35 -7
  2. package/android/src/main/CMakeLists.txt +22 -11
  3. package/android/src/main/java/com/rnllama/LlamaContext.java +42 -6
  4. package/android/src/main/java/com/rnllama/RNLlama.java +139 -4
  5. package/android/src/main/jni.cpp +173 -18
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  9. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  10. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  11. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  12. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  13. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  14. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +24 -4
  15. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +22 -2
  16. package/cpp/LICENSE +21 -0
  17. package/cpp/chat.cpp +129 -107
  18. package/cpp/chat.h +2 -0
  19. package/cpp/common.cpp +58 -78
  20. package/cpp/common.h +29 -21
  21. package/cpp/ggml-alloc.c +4 -1
  22. package/cpp/ggml-backend.cpp +9 -5
  23. package/cpp/ggml-backend.h +4 -4
  24. package/cpp/ggml-cpp.h +1 -1
  25. package/cpp/ggml-cpu/amx/amx.cpp +221 -0
  26. package/cpp/ggml-cpu/amx/amx.h +8 -0
  27. package/cpp/ggml-cpu/amx/common.h +91 -0
  28. package/cpp/ggml-cpu/amx/mmq.cpp +2511 -0
  29. package/cpp/ggml-cpu/amx/mmq.h +10 -0
  30. package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/binary-ops.h +1 -1
  31. package/cpp/ggml-cpu/common.h +72 -0
  32. package/cpp/{ggml-cpu-aarch64.cpp → ggml-cpu/ggml-cpu-aarch64.cpp} +809 -103
  33. package/cpp/{ggml-cpu-quants.c → ggml-cpu/ggml-cpu-quants.c} +306 -6
  34. package/cpp/{ggml-cpu.c → ggml-cpu/ggml-cpu.c} +114 -55
  35. package/cpp/{ggml-cpu.cpp → ggml-cpu/ggml-cpu.cpp} +32 -16
  36. package/cpp/{ops.cpp → ggml-cpu/ops.cpp} +353 -173
  37. package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/ops.h +2 -20
  38. package/cpp/{sgemm.cpp → ggml-cpu/sgemm.cpp} +501 -0
  39. package/{ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers → cpp/ggml-cpu}/simd-mappings.h +7 -3
  40. package/{ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers → cpp/ggml-cpu}/unary-ops.h +1 -1
  41. package/cpp/{vec.cpp → ggml-cpu/vec.cpp} +0 -6
  42. package/{ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers → cpp/ggml-cpu}/vec.h +16 -0
  43. package/cpp/ggml-cpu.h +5 -0
  44. package/cpp/ggml-impl.h +16 -9
  45. package/cpp/ggml-llama-sim.metallib +0 -0
  46. package/cpp/ggml-llama.metallib +0 -0
  47. package/cpp/ggml-metal-impl.h +36 -11
  48. package/cpp/ggml-metal.m +810 -176
  49. package/cpp/ggml-opt.cpp +373 -190
  50. package/cpp/ggml-opt.h +49 -28
  51. package/cpp/ggml-quants.c +0 -6
  52. package/cpp/ggml.c +227 -282
  53. package/cpp/ggml.h +82 -101
  54. package/cpp/gguf.cpp +33 -33
  55. package/cpp/json-schema-to-grammar.cpp +3 -0
  56. package/cpp/llama-adapter.cpp +6 -0
  57. package/cpp/llama-arch.cpp +49 -17
  58. package/cpp/llama-arch.h +9 -0
  59. package/cpp/llama-batch.cpp +8 -2
  60. package/cpp/llama-batch.h +2 -1
  61. package/cpp/llama-chat.cpp +39 -16
  62. package/cpp/llama-chat.h +4 -2
  63. package/cpp/llama-context.cpp +440 -611
  64. package/cpp/llama-context.h +44 -33
  65. package/cpp/llama-cparams.h +1 -0
  66. package/cpp/llama-graph.cpp +214 -291
  67. package/cpp/llama-graph.h +69 -21
  68. package/cpp/llama-hparams.cpp +17 -1
  69. package/cpp/llama-hparams.h +39 -5
  70. package/cpp/llama-kv-cache.cpp +2067 -620
  71. package/cpp/llama-kv-cache.h +410 -108
  72. package/cpp/llama-memory.h +12 -1
  73. package/cpp/llama-model-loader.cpp +24 -15
  74. package/cpp/llama-model-saver.cpp +281 -0
  75. package/cpp/llama-model-saver.h +37 -0
  76. package/cpp/llama-model.cpp +1089 -359
  77. package/cpp/llama-model.h +19 -3
  78. package/cpp/llama-sampling.cpp +20 -7
  79. package/cpp/llama-vocab.cpp +54 -9
  80. package/cpp/llama-vocab.h +6 -0
  81. package/cpp/llama.cpp +14 -0
  82. package/cpp/llama.h +86 -142
  83. package/cpp/minja/chat-template.hpp +9 -5
  84. package/cpp/minja/minja.hpp +69 -36
  85. package/cpp/rn-llama.cpp +602 -190
  86. package/cpp/rn-llama.h +34 -8
  87. package/cpp/sampling.cpp +57 -50
  88. package/cpp/tools/mtmd/clip-impl.h +462 -0
  89. package/cpp/tools/mtmd/clip.cpp +4024 -0
  90. package/cpp/tools/mtmd/clip.h +101 -0
  91. package/cpp/tools/mtmd/miniaudio.h +93468 -0
  92. package/cpp/tools/mtmd/mtmd-audio.cpp +855 -0
  93. package/cpp/tools/mtmd/mtmd-audio.h +62 -0
  94. package/cpp/tools/mtmd/mtmd-helper.cpp +297 -0
  95. package/cpp/tools/mtmd/mtmd.cpp +942 -0
  96. package/cpp/tools/mtmd/mtmd.h +362 -0
  97. package/cpp/tools/mtmd/stb_image.h +7988 -0
  98. package/ios/CMakeLists.txt +20 -10
  99. package/ios/RNLlama.h +6 -0
  100. package/ios/RNLlama.mm +82 -3
  101. package/ios/RNLlamaContext.h +5 -1
  102. package/ios/RNLlamaContext.mm +131 -38
  103. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +2 -0
  104. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +29 -21
  105. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
  106. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpp.h +1 -1
  107. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +5 -0
  108. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +16 -9
  109. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  110. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
  111. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +82 -101
  112. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +9 -0
  113. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +2 -1
  114. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +4 -2
  115. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +44 -33
  116. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
  117. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +69 -21
  118. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +39 -5
  119. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +410 -108
  120. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +12 -1
  121. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
  122. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +19 -3
  123. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
  124. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +86 -142
  125. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  126. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
  127. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +34 -8
  128. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
  129. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  130. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
  131. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
  132. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +29 -21
  133. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
  134. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +1 -1
  135. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +5 -0
  136. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +16 -9
  137. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  138. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
  139. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +82 -101
  140. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +9 -0
  141. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +2 -1
  142. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +4 -2
  143. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +44 -33
  144. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
  145. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +69 -21
  146. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +39 -5
  147. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +410 -108
  148. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +12 -1
  149. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
  150. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +19 -3
  151. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
  152. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +86 -142
  153. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  154. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
  155. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +34 -8
  156. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  157. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
  158. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  159. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  160. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +2 -0
  161. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +29 -21
  162. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
  163. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpp.h +1 -1
  164. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +5 -0
  165. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +16 -9
  166. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  167. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
  168. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +82 -101
  169. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +9 -0
  170. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +2 -1
  171. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +4 -2
  172. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +44 -33
  173. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
  174. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +69 -21
  175. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +39 -5
  176. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +410 -108
  177. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +12 -1
  178. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
  179. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +19 -3
  180. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
  181. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +86 -142
  182. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  183. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
  184. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +34 -8
  185. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
  186. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  187. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
  188. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
  189. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +29 -21
  190. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
  191. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +1 -1
  192. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +5 -0
  193. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +16 -9
  194. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  195. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
  196. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +82 -101
  197. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +9 -0
  198. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +2 -1
  199. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +4 -2
  200. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +44 -33
  201. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
  202. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +69 -21
  203. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +39 -5
  204. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +410 -108
  205. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +12 -1
  206. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
  207. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +19 -3
  208. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
  209. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +86 -142
  210. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  211. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
  212. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +34 -8
  213. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  214. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
  215. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  216. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  217. package/jest/mock.js +33 -7
  218. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  219. package/lib/commonjs/index.js +153 -21
  220. package/lib/commonjs/index.js.map +1 -1
  221. package/lib/module/NativeRNLlama.js.map +1 -1
  222. package/lib/module/index.js +152 -20
  223. package/lib/module/index.js.map +1 -1
  224. package/lib/typescript/NativeRNLlama.d.ts +54 -4
  225. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  226. package/lib/typescript/index.d.ts +72 -6
  227. package/lib/typescript/index.d.ts.map +1 -1
  228. package/package.json +1 -1
  229. package/src/NativeRNLlama.ts +72 -4
  230. package/src/index.ts +212 -38
  231. package/cpp/binary-ops.h +0 -16
  232. package/cpp/ops.h +0 -128
  233. package/cpp/simd-mappings.h +0 -888
  234. package/cpp/unary-ops.h +0 -28
  235. package/cpp/vec.h +0 -802
  236. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/binary-ops.h +0 -16
  237. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
  238. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
  239. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
  240. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
  241. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ops.h +0 -128
  242. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sgemm.h +0 -14
  243. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/simd-mappings.h +0 -888
  244. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/vec.h +0 -802
  245. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
  246. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
  247. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
  248. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
  249. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +0 -14
  250. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +0 -28
  251. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +0 -802
  252. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/binary-ops.h +0 -16
  253. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
  254. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
  255. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
  256. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
  257. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ops.h +0 -128
  258. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sgemm.h +0 -14
  259. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/simd-mappings.h +0 -888
  260. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unary-ops.h +0 -28
  261. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/binary-ops.h +0 -16
  262. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +0 -8
  263. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +0 -512
  264. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +0 -63
  265. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +0 -38
  266. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ops.h +0 -128
  267. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +0 -14
  268. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/simd-mappings.h +0 -888
  269. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +0 -28
  270. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +0 -802
  271. package/lib/commonjs/chat.js +0 -37
  272. package/lib/commonjs/chat.js.map +0 -1
  273. package/lib/module/chat.js +0 -33
  274. package/lib/module/chat.js.map +0 -1
  275. package/lib/typescript/chat.d.ts +0 -10
  276. package/lib/typescript/chat.d.ts.map +0 -1
  277. package/src/chat.ts +0 -44
  278. /package/cpp/{binary-ops.cpp → ggml-cpu/binary-ops.cpp} +0 -0
  279. /package/cpp/{ggml-cpu-aarch64.h → ggml-cpu/ggml-cpu-aarch64.h} +0 -0
  280. /package/cpp/{ggml-cpu-impl.h → ggml-cpu/ggml-cpu-impl.h} +0 -0
  281. /package/cpp/{ggml-cpu-quants.h → ggml-cpu/ggml-cpu-quants.h} +0 -0
  282. /package/cpp/{ggml-cpu-traits.cpp → ggml-cpu/ggml-cpu-traits.cpp} +0 -0
  283. /package/cpp/{ggml-cpu-traits.h → ggml-cpu/ggml-cpu-traits.h} +0 -0
  284. /package/cpp/{sgemm.h → ggml-cpu/sgemm.h} +0 -0
  285. /package/cpp/{unary-ops.cpp → ggml-cpu/unary-ops.cpp} +0 -0
@@ -2,32 +2,75 @@
2
2
 
3
3
  #include "llama.h"
4
4
  #include "llama-io.h"
5
+ #include "llama-graph.h"
5
6
  #include "llama-memory.h"
6
7
 
7
8
  #include "ggml-cpp.h"
8
9
 
9
- #include <functional>
10
10
  #include <set>
11
+ #include <unordered_map>
11
12
  #include <vector>
12
13
 
13
14
  struct llama_cparams;
14
15
  struct llama_hparams;
15
16
  struct llama_ubatch;
17
+ struct llama_sbatch;
18
+ struct llama_model;
19
+ struct llama_context;
16
20
 
17
21
  struct llama_kv_cache : public llama_memory_i {
18
- using llama_memory_i::llama_memory_i;
22
+ virtual ~llama_kv_cache() = default;
19
23
 
20
- virtual void restore() = 0; // call if batch processing fails - restores the cache state
21
- virtual void commit() = 0; // call after successful batch processing - clears any pending state
24
+ // call if batch processing fails - restores the cache state
25
+ virtual void restore() = 0;
22
26
 
23
- virtual int32_t get_n_tokens() const = 0;
24
- virtual int32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache
27
+ // call after successful batch processing - clears any pending state
28
+ virtual void commit() = 0;
25
29
 
30
+ // process any pending defrag/shift/etc. operations
31
+ // optionally call once before processing a new batch
32
+ virtual bool update(llama_context & lctx) = 0;
33
+
34
+ // schedule a defrag if the fragmentation threshold is exceeded. otherwise, do nothing
35
+ virtual void defrag_sched(float thold) = 0;
36
+
37
+ // simulate full cache, used for allocating worst-case compute buffers
38
+ virtual void set_full() = 0;
39
+
40
+ //
41
+ // batch processing
42
+ //
43
+
44
+ // =============================================================================================================
45
+ // TODO: refactor and simplify this
46
+
47
+ virtual llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) = 0;
48
+
49
+ // different KV caches require different batch splitting strategies
50
+ virtual llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const = 0;
51
+
52
+ // find an empty slot of size "n_tokens" in the cache
53
+ virtual bool find_slot(const llama_ubatch & batch) = 0;
54
+
55
+ // =============================================================================================================
56
+
57
+ // getters
26
58
  virtual bool get_can_shift() const = 0;
27
59
 
28
60
  bool get_can_edit() const override { return get_can_shift(); }
61
+
62
+ //
63
+ // state write/read
64
+ //
65
+
66
+ virtual void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const = 0;
67
+ virtual void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) = 0;
29
68
  };
30
69
 
70
+ //
71
+ // llama_kv_cache_guard
72
+ //
73
+
31
74
  struct llama_kv_cache_guard {
32
75
  llama_kv_cache_guard(llama_kv_cache * kv) : kv(kv) {}
33
76
 
@@ -43,171 +86,430 @@ private:
43
86
  llama_kv_cache * kv;
44
87
  };
45
88
 
46
- struct llama_kv_cell {
47
- llama_pos pos = -1;
48
- llama_pos delta = 0;
49
- int32_t src = -1; // used by recurrent state models to copy states
50
- int32_t tail = -1;
89
+ //
90
+ // llama_kv_cache_unified
91
+ //
51
92
 
52
- std::set<llama_seq_id> seq_id;
93
+ class llama_kv_cache_unified : public llama_kv_cache {
94
+ public:
95
+ static uint32_t get_padding(const llama_cparams & cparams);
53
96
 
54
- bool has_seq_id(const llama_seq_id & id) const {
55
- return seq_id.find(id) != seq_id.end();
56
- }
97
+ // this callback is used to filter out layers that should not be included in the cache
98
+ using layer_filter_cb = std::function<bool(int32_t il)>;
57
99
 
58
- bool is_empty() const {
59
- return seq_id.empty();
60
- }
100
+ llama_kv_cache_unified(
101
+ const llama_model & model,
102
+ layer_filter_cb && filter,
103
+ lm_ggml_type type_k,
104
+ lm_ggml_type type_v,
105
+ bool v_trans,
106
+ bool offload,
107
+ uint32_t kv_size,
108
+ uint32_t n_seq_max,
109
+ uint32_t n_pad,
110
+ uint32_t n_swa,
111
+ llama_swa_type swa_type);
112
+
113
+ ~llama_kv_cache_unified() = default;
114
+
115
+ //
116
+ // llama_memory_i
117
+ //
61
118
 
62
- bool is_same_seq(const llama_kv_cell & other) const {
63
- return seq_id == other.seq_id;
64
- }
65
- };
119
+ void clear() override;
66
120
 
67
- // ring-buffer of cached KV data
68
- // TODO: pimpl
69
- // TODO: add notion of max sequences
70
- class llama_kv_cache_unified : public llama_kv_cache {
71
- public:
72
- // can be used to query data from the model if needed
73
- struct callbacks {
74
- std::function<lm_ggml_tensor * (uint32_t n_ctx_per_seq, int il)> get_rope_factors;
121
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
122
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
123
+ void seq_keep(llama_seq_id seq_id) override;
124
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
125
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
126
+
127
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
128
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
129
+
130
+ //
131
+ // llama_kv_cache
132
+ //
133
+
134
+ void restore() override;
135
+ void commit() override;
136
+
137
+ bool update(llama_context & ctx) override;
138
+
139
+ void defrag_sched(float thold) override;
140
+
141
+ void set_full() override;
142
+
143
+ llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
144
+ llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
145
+
146
+ // updates the cache head
147
+ // Note: On success, it's important that cache.head points
148
+ // to the first cell of the slot.
149
+ bool find_slot(const llama_ubatch & batch) override;
150
+
151
+ bool get_can_shift() const override;
152
+
153
+ // state write/load
154
+
155
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
156
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
157
+
158
+ //
159
+ // llama_kv_cache_unified specific API
160
+ //
161
+
162
+ uint32_t get_n() const;
163
+ uint32_t get_size() const;
164
+
165
+ // get views of the current state of the cache
166
+ lm_ggml_tensor * get_k(lm_ggml_context * ctx, int32_t il) const;
167
+ lm_ggml_tensor * get_v(lm_ggml_context * ctx, int32_t il) const;
168
+
169
+ // store k_cur and v_cur in the cache based on the current head location
170
+ lm_ggml_tensor * cpy_k(lm_ggml_context * ctx, lm_ggml_tensor * k_cur, int32_t il) const;
171
+ lm_ggml_tensor * cpy_v(lm_ggml_context * ctx, lm_ggml_tensor * v_cur, int32_t il) const;
172
+
173
+ void prune_swa(llama_seq_id seq_id, llama_pos pmin, llama_pos pmax);
174
+
175
+ void set_input_kq_mask (lm_ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
176
+ void set_input_k_shift (lm_ggml_tensor * dst) const;
177
+ void set_input_pos_bucket(lm_ggml_tensor * dst, const llama_ubatch * ubatch) const;
178
+
179
+ private:
180
+ const llama_model & model;
181
+ const llama_hparams & hparams;
182
+
183
+ struct kv_cell {
184
+ llama_pos pos = -1;
185
+ llama_pos delta = 0;
186
+
187
+ // TODO: replace with bitset uint64_t
188
+ std::set<llama_seq_id> seq_id;
189
+
190
+ bool has_seq_id(const llama_seq_id & id) const {
191
+ return seq_id.find(id) != seq_id.end();
192
+ }
193
+
194
+ bool is_empty() const {
195
+ return seq_id.empty();
196
+ }
197
+
198
+ bool is_same_seq(const kv_cell & other) const {
199
+ return seq_id == other.seq_id;
200
+ }
75
201
  };
76
202
 
77
- llama_kv_cache_unified(
78
- const llama_hparams & hparams,
79
- callbacks cbs);
203
+ struct kv_layer {
204
+ // layer index in the model
205
+ // note: can be different from the layer index in the KV cache
206
+ uint32_t il;
207
+
208
+ lm_ggml_tensor * k;
209
+ lm_ggml_tensor * v;
210
+ };
211
+
212
+ bool has_shift = false;
213
+ bool do_defrag = false;
214
+ bool v_trans = true; // the value tensor is transposed
215
+
216
+ uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
217
+ uint32_t size = 0; // total number of cells, shared across all sequences
218
+ uint32_t used = 0; // used cells (i.e. at least one seq_id) (TODO: add `struct kv_cells` and keep track automaticallt)
219
+
220
+ // computed before each graph build
221
+ uint32_t n = 0;
222
+
223
+ const uint32_t n_seq_max = 1;
224
+
225
+ // required padding
226
+ const uint32_t n_pad = 1;
227
+
228
+ // SWA
229
+ const uint32_t n_swa = 0;
230
+
231
+ const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
232
+
233
+ std::vector<lm_ggml_context_ptr> ctxs;
234
+ std::vector<lm_ggml_backend_buffer_ptr> bufs;
235
+
236
+ std::vector<kv_cell> cells; // TODO: replace with `struct kv_cells`
237
+ std::vector<kv_layer> layers;
80
238
 
81
- virtual ~llama_kv_cache_unified() = default;
239
+ // model layer id -> KV cache layer id
240
+ std::unordered_map<int32_t, int32_t> map_layer_ids;
241
+
242
+ // recovery information used to restore the KV cells to their original state in case of a failure
243
+ struct {
244
+ void clear() {
245
+ cells.clear();
246
+ }
247
+
248
+ std::unordered_map<uint32_t, kv_cell> cells;
249
+ } recovery;
250
+
251
+ // defrag
252
+ struct {
253
+ std::vector<uint32_t> ids;
254
+ } defrag_info;
255
+
256
+ // return true if cells have been moved
257
+ bool defrag_prepare(int32_t n_max_nodes);
258
+
259
+ // find how many cells are currently in use
260
+ uint32_t cell_max() const;
261
+
262
+ size_t total_size() const;
263
+
264
+ size_t size_k_bytes() const;
265
+ size_t size_v_bytes() const;
266
+
267
+ bool is_masked_swa(llama_pos p0, llama_pos p1) const;
268
+
269
+ lm_ggml_tensor * build_rope_shift(
270
+ const llama_cparams & cparams,
271
+ lm_ggml_context * ctx,
272
+ lm_ggml_tensor * cur,
273
+ lm_ggml_tensor * shift,
274
+ lm_ggml_tensor * factors,
275
+ float freq_base,
276
+ float freq_scale) const;
277
+
278
+ llm_graph_result_ptr build_graph_shift(
279
+ const llama_cparams & cparams,
280
+ lm_ggml_context * ctx,
281
+ lm_ggml_cgraph * gf) const;
282
+
283
+ llm_graph_result_ptr build_graph_defrag(
284
+ const llama_cparams & cparams,
285
+ lm_ggml_context * ctx,
286
+ lm_ggml_cgraph * gf) const;
287
+
288
+ void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
289
+ void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
82
290
 
83
- // TODO: become constructor
84
- bool init(
85
- const llama_model & model, // TODO: do not reference the model
86
- const llama_cparams & cparams,
291
+ bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
292
+ bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
293
+ };
294
+
295
+ //
296
+ // llama_kv_cache_unified_iswa
297
+ //
298
+
299
+ // utilizes two instances of llama_kv_cache_unified
300
+ // the first instance is for the non-SWA layers of the model and the second instance is for the SWA layers
301
+ // upon successful commit, the SWA cache removes old tokens outside the n_swa window
302
+
303
+ class llama_kv_cache_unified_iswa : public llama_kv_cache {
304
+ public:
305
+ llama_kv_cache_unified_iswa(
306
+ const llama_model & model,
87
307
  lm_ggml_type type_k,
88
308
  lm_ggml_type type_v,
309
+ bool v_trans,
310
+ bool offload,
311
+ bool swa_full,
89
312
  uint32_t kv_size,
90
- bool offload);
313
+ uint32_t n_seq_max,
314
+ uint32_t n_batch,
315
+ uint32_t n_pad);
91
316
 
92
- int32_t get_n_tokens() const override;
93
- int32_t get_used_cells() const override;
94
-
95
- size_t total_size() const;
317
+ ~llama_kv_cache_unified_iswa() = default;
96
318
 
97
- // TODO: better data structures to reduce the cost of this operation
98
- llama_pos pos_max() const;
319
+ //
320
+ // llama_memory_i
321
+ //
99
322
 
100
323
  void clear() override;
101
- void defrag() override;
102
-
103
- virtual void restore() override;
104
- virtual void commit() override;
105
324
 
106
325
  bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
107
326
  void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
108
- void seq_keep(llama_seq_id seq_id) override;
327
+ void seq_keep(llama_seq_id seq_id) override;
109
328
  void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
110
329
  void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
111
330
 
331
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
112
332
  llama_pos seq_pos_max(llama_seq_id seq_id) const override;
113
333
 
334
+ //
335
+ // llama_kv_cache
336
+ //
337
+
338
+ void restore() override;
339
+ void commit() override;
340
+
341
+ bool update(llama_context & ctx) override;
342
+
343
+ void defrag_sched(float thold) override;
344
+
345
+ void set_full() override;
346
+
347
+ llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
348
+ llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
349
+
350
+ bool find_slot(const llama_ubatch & batch) override;
351
+
114
352
  bool get_can_shift() const override;
115
353
 
116
- // find an empty slot of size "n_tokens" in the cache
117
- // updates the cache head
118
- // Note: On success, it's important that cache.head points
119
- // to the first cell of the slot.
120
- bool find_slot(const llama_ubatch & batch);
354
+ // state write/load
121
355
 
122
- // TODO: maybe not needed
123
- uint32_t get_padding(const llama_cparams & cparams) const;
356
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
357
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
124
358
 
125
- // find how many cells are currently in use
126
- uint32_t cell_max() const;
359
+ //
360
+ // llama_kv_cache_unified_iswa specific API
361
+ //
127
362
 
128
- size_t size_k_bytes() const;
129
- size_t size_v_bytes() const;
363
+ llama_kv_cache_unified * get_kv_base() const;
364
+ llama_kv_cache_unified * get_kv_swa () const;
130
365
 
131
- // defrag
366
+ private:
367
+ const llama_hparams & hparams;
368
+
369
+ bool do_prune = true;
132
370
 
133
371
  struct {
134
- std::vector<uint32_t> ids;
135
- } defrag_info;
372
+ struct entry {
373
+ llama_pos pmin;
374
+ llama_pos pmax;
375
+ };
136
376
 
137
- // return true if cells have been moved
138
- bool defrag_prepare(int32_t n_max_nodes);
377
+ void clear() {
378
+ pos.clear();
379
+ }
139
380
 
140
- // commit/restore cache
381
+ // used to perform SWA pruning of old tokens
382
+ std::unordered_map<llama_seq_id, entry> pos;
383
+ } pending;
141
384
 
142
- struct slot_range {
143
- uint32_t c0 = 0; // note: these are cell indices, not sequence positions
144
- uint32_t c1 = 0;
385
+ std::unique_ptr<llama_kv_cache_unified> kv_base;
386
+ std::unique_ptr<llama_kv_cache_unified> kv_swa;
387
+ };
388
+
389
+ //
390
+ // llama_kv_cache_recurrent
391
+ //
392
+
393
+ class llama_kv_cache_recurrent : public llama_kv_cache {
394
+ public:
395
+ struct kv_cell {
396
+ llama_pos pos = -1;
397
+ int32_t src = -1; // used to copy states
398
+ int32_t tail = -1;
399
+
400
+ std::set<llama_seq_id> seq_id;
401
+
402
+ bool has_seq_id(const llama_seq_id & id) const {
403
+ return seq_id.find(id) != seq_id.end();
404
+ }
405
+
406
+ bool is_empty() const {
407
+ return seq_id.empty();
408
+ }
409
+
410
+ bool is_same_seq(const kv_cell & other) const {
411
+ return seq_id == other.seq_id;
412
+ }
145
413
  };
146
414
 
147
- // pending cell updates that are not yet committed
148
- struct {
149
- std::vector<slot_range> ranges;
150
- } pending;
415
+ llama_kv_cache_recurrent(
416
+ const llama_model & model,
417
+ lm_ggml_type type_k,
418
+ lm_ggml_type type_v,
419
+ bool offload,
420
+ uint32_t kv_size,
421
+ uint32_t n_seq_max);
151
422
 
152
- // state write/load
423
+ ~llama_kv_cache_recurrent() = default;
153
424
 
154
- void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const;
155
- void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1);
425
+ //
426
+ // llama_memory_i
427
+ //
156
428
 
157
- // members
429
+ void clear() override;
158
430
 
159
- const llama_hparams & hparams;
431
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
432
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
433
+ void seq_keep(llama_seq_id seq_id) override;
434
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
435
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
160
436
 
161
- callbacks cbs;
437
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
438
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
162
439
 
163
- bool has_shift = false;
164
- bool do_defrag = false;
440
+ //
441
+ // llama_kv_cache
442
+ //
165
443
 
166
- // TODO: remove this and implement llama_kv_cache_recurrent instead
167
- bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
444
+ void restore() override;
445
+ void commit() override;
168
446
 
169
- bool v_trans = true; // the value tensor is transposed
170
- bool can_shift = false;
447
+ bool update(llama_context & ctx) override;
448
+
449
+ void defrag_sched(float thold) override;
450
+
451
+ void set_full() override;
171
452
 
172
- // Note: The value of head isn't only used to optimize searching
173
- // for a free KV slot. llama_decode_impl also uses it, so it
174
- // cannot be freely changed after a slot has been allocated.
175
- uint32_t head = 0;
176
- uint32_t size = 0;
453
+ llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
454
+ llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
455
+
456
+ bool find_slot(const llama_ubatch & batch) override;
457
+
458
+ bool get_can_shift() const override;
459
+
460
+ // TODO: temporary methods - they are not really const as they do const_cast<>, fix this
461
+ int32_t s_copy(int i) const;
462
+ float s_mask(int i) const;
463
+
464
+ // state write/load
465
+
466
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
467
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
468
+
469
+ uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
470
+ uint32_t size = 0; // total number of cells, shared across all sequences
177
471
  uint32_t used = 0; // used cells (i.e. at least one seq_id)
178
472
 
179
473
  // computed before each graph build
180
474
  uint32_t n = 0;
181
475
 
182
- std::vector<llama_kv_cell> cells;
476
+ std::vector<kv_cell> cells;
183
477
 
184
478
  std::vector<lm_ggml_tensor *> k_l; // per layer
185
479
  std::vector<lm_ggml_tensor *> v_l;
186
480
 
187
481
  private:
188
- lm_ggml_type type_k = LM_GGML_TYPE_F16;
189
- lm_ggml_type type_v = LM_GGML_TYPE_F16;
482
+ //const llama_model & model;
483
+ const llama_hparams & hparams;
484
+
485
+ // commit/restore cache
486
+ // TODO: rework for recurrent cache
487
+ struct slot_range {
488
+ uint32_t c0 = 0; // note: these are cell indices, not sequence positions
489
+ uint32_t c1 = 0;
490
+ };
491
+
492
+ // pending cell updates that are not yet committed
493
+ struct {
494
+ std::vector<slot_range> ranges;
495
+ } pending;
496
+
497
+ const uint32_t n_seq_max = 1;
190
498
 
191
499
  std::vector<lm_ggml_context_ptr> ctxs;
192
500
  std::vector<lm_ggml_backend_buffer_ptr> bufs;
193
501
 
502
+ // find how many cells are currently in use
503
+ uint32_t cell_max() const;
504
+
505
+ size_t total_size() const;
506
+
507
+ size_t size_k_bytes() const;
508
+ size_t size_v_bytes() const;
509
+
194
510
  void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
195
511
  void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
196
512
 
197
513
  bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
198
514
  bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
199
515
  };
200
-
201
- // TODO: temporary reusing llama_kv_cache_unified -- implement recurrent cache and simplify llama_kv_cache_unified
202
- //class llama_kv_cache_recurrent : public llama_kv_cache_unified {
203
- //public:
204
- // using llama_kv_cache_unified::llama_kv_cache_unified;
205
- //};
206
-
207
- //
208
- // kv cache view
209
- //
210
-
211
- llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max);
212
-
213
- void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv);
@@ -2,12 +2,22 @@
2
2
 
3
3
  #include "llama.h"
4
4
 
5
+ struct llama_memory_params {
6
+ // kv cache
7
+ lm_ggml_type type_k;
8
+ lm_ggml_type type_v;
9
+
10
+ // use full-size SWA cache
11
+ bool swa_full;
12
+ };
13
+
5
14
  // general concept of LLM memory
6
15
  // the KV cache is a type of LLM memory, but there can be other types
7
16
  class llama_memory_i {
8
17
  public:
18
+ virtual ~llama_memory_i() = default;
19
+
9
20
  virtual void clear() = 0;
10
- virtual void defrag() = 0;
11
21
 
12
22
  virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0;
13
23
  virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0;
@@ -15,6 +25,7 @@ public:
15
25
  virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) = 0;
16
26
  virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0;
17
27
 
28
+ virtual llama_pos seq_pos_min(llama_seq_id seq_id) const = 0;
18
29
  virtual llama_pos seq_pos_max(llama_seq_id seq_id) const = 0;
19
30
 
20
31
  virtual bool get_can_edit() const = 0;