cui-llama.rn 1.7.4 → 1.7.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (276) hide show
  1. package/README.md +217 -17
  2. package/android/src/main/CMakeLists.txt +34 -15
  3. package/android/src/main/java/com/rnllama/LlamaContext.java +79 -5
  4. package/android/src/main/java/com/rnllama/RNLlama.java +237 -0
  5. package/android/src/main/jni.cpp +213 -14
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  9. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  10. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  11. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  12. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  13. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  14. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +35 -0
  15. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +34 -0
  16. package/cpp/README.md +1 -1
  17. package/cpp/chat-parser.cpp +385 -0
  18. package/cpp/chat-parser.h +120 -0
  19. package/cpp/chat.cpp +726 -596
  20. package/cpp/chat.h +71 -6
  21. package/cpp/common.cpp +56 -38
  22. package/cpp/common.h +9 -3
  23. package/cpp/ggml-backend-reg.cpp +5 -0
  24. package/cpp/ggml-backend.cpp +10 -2
  25. package/cpp/ggml-common.h +4 -0
  26. package/cpp/ggml-cpu/amx/amx.cpp +1 -1
  27. package/cpp/ggml-cpu/amx/mmq.cpp +11 -10
  28. package/cpp/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  29. package/cpp/ggml-cpu/arch/arm/quants.c +4114 -0
  30. package/cpp/ggml-cpu/arch/arm/repack.cpp +2163 -0
  31. package/cpp/ggml-cpu/arch/x86/cpu-feats.cpp +327 -0
  32. package/cpp/ggml-cpu/arch/x86/quants.c +4311 -0
  33. package/cpp/ggml-cpu/{ggml-cpu-aarch64.cpp → arch/x86/repack.cpp} +79 -3225
  34. package/cpp/ggml-cpu/arch-fallback.h +184 -0
  35. package/cpp/ggml-cpu/common.h +4 -3
  36. package/cpp/ggml-cpu/ggml-cpu-impl.h +21 -16
  37. package/cpp/ggml-cpu/ggml-cpu.c +123 -104
  38. package/cpp/ggml-cpu/ggml-cpu.cpp +11 -8
  39. package/cpp/ggml-cpu/ops.cpp +330 -148
  40. package/cpp/ggml-cpu/ops.h +1 -0
  41. package/cpp/ggml-cpu/quants.c +1158 -0
  42. package/cpp/ggml-cpu/{ggml-cpu-quants.h → quants.h} +26 -0
  43. package/cpp/ggml-cpu/repack.cpp +1571 -0
  44. package/cpp/ggml-cpu/repack.h +98 -0
  45. package/cpp/ggml-cpu/simd-mappings.h +330 -38
  46. package/cpp/ggml-cpu/{ggml-cpu-traits.cpp → traits.cpp} +1 -1
  47. package/cpp/ggml-cpu/vec.cpp +87 -18
  48. package/cpp/ggml-cpu/vec.h +249 -94
  49. package/cpp/ggml-cpu.h +1 -0
  50. package/cpp/ggml-impl.h +63 -183
  51. package/cpp/ggml-llama-sim.metallib +0 -0
  52. package/cpp/ggml-llama.metallib +0 -0
  53. package/cpp/ggml-metal.m +152 -45
  54. package/cpp/ggml-quants.c +0 -2
  55. package/cpp/ggml.c +61 -21
  56. package/cpp/ggml.h +22 -3
  57. package/cpp/gguf.cpp +24 -3
  58. package/cpp/json-partial.cpp +256 -0
  59. package/cpp/json-partial.h +38 -0
  60. package/cpp/json-schema-to-grammar.cpp +5 -47
  61. package/cpp/json-schema-to-grammar.h +4 -4
  62. package/cpp/llama-arch.cpp +153 -3
  63. package/cpp/llama-arch.h +27 -1
  64. package/cpp/llama-batch.cpp +741 -272
  65. package/cpp/llama-batch.h +112 -54
  66. package/cpp/llama-chat.cpp +30 -8
  67. package/cpp/llama-chat.h +1 -0
  68. package/cpp/llama-context.cpp +524 -339
  69. package/cpp/llama-context.h +38 -17
  70. package/cpp/llama-cparams.cpp +4 -0
  71. package/cpp/llama-cparams.h +2 -0
  72. package/cpp/llama-grammar.cpp +12 -2
  73. package/cpp/llama-graph.cpp +431 -356
  74. package/cpp/llama-graph.h +126 -58
  75. package/cpp/llama-hparams.cpp +10 -2
  76. package/cpp/llama-hparams.h +19 -2
  77. package/cpp/llama-kv-cache-unified-iswa.cpp +279 -0
  78. package/cpp/llama-kv-cache-unified-iswa.h +128 -0
  79. package/cpp/llama-kv-cache-unified.cpp +1841 -0
  80. package/cpp/llama-kv-cache-unified.h +303 -0
  81. package/cpp/llama-kv-cells.h +439 -0
  82. package/cpp/llama-memory-hybrid.cpp +246 -0
  83. package/cpp/llama-memory-hybrid.h +138 -0
  84. package/cpp/llama-memory-recurrent.cpp +1112 -0
  85. package/cpp/llama-memory-recurrent.h +183 -0
  86. package/cpp/llama-memory.cpp +41 -0
  87. package/cpp/llama-memory.h +86 -5
  88. package/cpp/llama-mmap.cpp +1 -1
  89. package/cpp/llama-model-loader.cpp +42 -17
  90. package/cpp/llama-model-saver.cpp +1 -0
  91. package/cpp/llama-model.cpp +1639 -513
  92. package/cpp/llama-model.h +26 -0
  93. package/cpp/llama-sampling.cpp +2 -2
  94. package/cpp/llama-vocab.cpp +65 -28
  95. package/cpp/llama-vocab.h +1 -0
  96. package/cpp/llama.cpp +11 -7
  97. package/cpp/llama.h +150 -42
  98. package/cpp/minja/chat-template.hpp +1 -1
  99. package/cpp/minja/minja.hpp +1 -1
  100. package/cpp/{json.hpp → nlohmann/json.hpp} +3027 -2267
  101. package/cpp/nlohmann/json_fwd.hpp +187 -0
  102. package/cpp/regex-partial.cpp +204 -0
  103. package/cpp/regex-partial.h +56 -0
  104. package/cpp/rn-llama.cpp +646 -35
  105. package/cpp/rn-llama.h +32 -1
  106. package/cpp/rn-tts.h +39 -0
  107. package/cpp/sampling.cpp +7 -8
  108. package/cpp/tools/mtmd/clip-impl.h +5 -0
  109. package/cpp/tools/mtmd/clip.cpp +572 -436
  110. package/cpp/tools/mtmd/clip.h +14 -4
  111. package/cpp/tools/mtmd/mtmd-audio.cpp +0 -86
  112. package/cpp/tools/mtmd/mtmd-audio.h +2 -17
  113. package/cpp/tools/mtmd/mtmd-helper.cpp +175 -12
  114. package/cpp/tools/mtmd/mtmd-helper.h +91 -0
  115. package/cpp/tools/mtmd/mtmd.cpp +368 -248
  116. package/cpp/tools/mtmd/mtmd.h +6 -70
  117. package/cpp/unicode.cpp +5 -0
  118. package/ios/CMakeLists.txt +26 -6
  119. package/ios/RNLlama.h +1 -1
  120. package/ios/RNLlama.mm +153 -3
  121. package/ios/RNLlamaContext.h +9 -1
  122. package/ios/RNLlamaContext.mm +112 -9
  123. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat-parser.h +120 -0
  124. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +71 -6
  125. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +9 -3
  126. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-common.h +4 -0
  127. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +1 -0
  128. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +63 -183
  129. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +22 -3
  130. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json-partial.h +38 -0
  131. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +4 -4
  132. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +27 -1
  133. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +112 -54
  134. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
  135. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +38 -17
  136. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +2 -0
  137. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +126 -58
  138. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +19 -2
  139. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache-unified-iswa.h +128 -0
  140. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache-unified.h +303 -0
  141. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cells.h +439 -0
  142. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory-hybrid.h +138 -0
  143. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory-recurrent.h +183 -0
  144. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +86 -5
  145. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +26 -0
  146. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +1 -0
  147. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +150 -42
  148. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +1 -1
  149. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +1 -1
  150. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/{json.hpp → nlohmann/json.hpp} +3027 -2267
  151. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/nlohmann/json_fwd.hpp +187 -0
  152. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/regex-partial.h +56 -0
  153. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +32 -1
  154. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-tts.h +39 -0
  155. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  156. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
  157. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat-parser.h +120 -0
  158. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +71 -6
  159. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +9 -3
  160. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +4 -0
  161. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +1 -0
  162. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +63 -183
  163. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +22 -3
  164. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json-partial.h +38 -0
  165. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +4 -4
  166. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +27 -1
  167. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +112 -54
  168. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
  169. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +38 -17
  170. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +2 -0
  171. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +126 -58
  172. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +19 -2
  173. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache-unified-iswa.h +128 -0
  174. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache-unified.h +303 -0
  175. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cells.h +439 -0
  176. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory-hybrid.h +138 -0
  177. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory-recurrent.h +183 -0
  178. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +86 -5
  179. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +26 -0
  180. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +1 -0
  181. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +150 -42
  182. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +1 -1
  183. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +1 -1
  184. package/ios/rnllama.xcframework/{tvos-arm64/rnllama.framework/Headers → ios-arm64_x86_64-simulator/rnllama.framework/Headers/nlohmann}/json.hpp +3027 -2267
  185. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/nlohmann/json_fwd.hpp +187 -0
  186. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/regex-partial.h +56 -0
  187. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +32 -1
  188. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-tts.h +39 -0
  189. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  190. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  191. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat-parser.h +120 -0
  192. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +71 -6
  193. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +9 -3
  194. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-common.h +4 -0
  195. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +1 -0
  196. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +63 -183
  197. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +22 -3
  198. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json-partial.h +38 -0
  199. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +4 -4
  200. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +27 -1
  201. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +112 -54
  202. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
  203. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +38 -17
  204. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +2 -0
  205. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +126 -58
  206. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +19 -2
  207. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache-unified-iswa.h +128 -0
  208. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache-unified.h +303 -0
  209. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cells.h +439 -0
  210. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory-hybrid.h +138 -0
  211. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory-recurrent.h +183 -0
  212. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +86 -5
  213. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +26 -0
  214. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +1 -0
  215. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +150 -42
  216. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +1 -1
  217. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +1 -1
  218. package/ios/rnllama.xcframework/{ios-arm64_x86_64-simulator/rnllama.framework/Headers → tvos-arm64/rnllama.framework/Headers/nlohmann}/json.hpp +3027 -2267
  219. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/nlohmann/json_fwd.hpp +187 -0
  220. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/regex-partial.h +56 -0
  221. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +32 -1
  222. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-tts.h +39 -0
  223. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  224. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
  225. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat-parser.h +120 -0
  226. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +71 -6
  227. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +9 -3
  228. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +4 -0
  229. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +1 -0
  230. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +63 -183
  231. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +22 -3
  232. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json-partial.h +38 -0
  233. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +4 -4
  234. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +27 -1
  235. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +112 -54
  236. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
  237. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +38 -17
  238. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +2 -0
  239. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +126 -58
  240. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +19 -2
  241. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache-unified-iswa.h +128 -0
  242. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache-unified.h +303 -0
  243. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cells.h +439 -0
  244. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory-hybrid.h +138 -0
  245. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory-recurrent.h +183 -0
  246. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +86 -5
  247. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +26 -0
  248. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +1 -0
  249. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +150 -42
  250. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +1 -1
  251. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +1 -1
  252. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/nlohmann/json.hpp +25526 -0
  253. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/nlohmann/json_fwd.hpp +187 -0
  254. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/regex-partial.h +56 -0
  255. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +32 -1
  256. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-tts.h +39 -0
  257. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  258. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  259. package/jest/mock.js +24 -0
  260. package/package.json +1 -1
  261. package/src/NativeRNLlama.ts +46 -2
  262. package/src/index.ts +105 -1
  263. package/cpp/ggml-cpu/ggml-cpu-aarch64.h +0 -8
  264. package/cpp/ggml-cpu/ggml-cpu-quants.c +0 -13326
  265. package/cpp/ggml-cpu/sgemm.cpp +0 -3544
  266. package/cpp/ggml-cpu/sgemm.h +0 -14
  267. package/cpp/llama-kv-cache.cpp +0 -2827
  268. package/cpp/llama-kv-cache.h +0 -515
  269. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +0 -515
  270. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +0 -515
  271. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +0 -515
  272. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +0 -24766
  273. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +0 -515
  274. /package/cpp/ggml-cpu/{ggml-cpu-traits.h → traits.h} +0 -0
  275. /package/cpp/tools/mtmd/{miniaudio.h → miniaudio/miniaudio.h} +0 -0
  276. /package/cpp/tools/mtmd/{stb_image.h → stb/stb_image.h} +0 -0
@@ -17,10 +17,12 @@ struct lm_ggml_tensor;
17
17
  struct llama_ubatch;
18
18
  struct llama_cparams;
19
19
 
20
- class llama_memory_i;
21
- class llama_kv_cache_unified;
22
- class llama_kv_cache_unified_iswa;
23
- class llama_kv_cache_recurrent;
20
+ struct llama_memory_context_i;
21
+
22
+ class llama_kv_cache_unified_context;
23
+ class llama_kv_cache_unified_iswa_context;
24
+ class llama_memory_recurrent_context;
25
+ class llama_memory_hybrid_context;
24
26
 
25
27
  // certain models (typically multi-modal) can produce different types of graphs
26
28
  enum llm_graph_type {
@@ -35,6 +37,7 @@ enum llm_ffn_op_type {
35
37
  LLM_FFN_RELU,
36
38
  LLM_FFN_RELU_SQR,
37
39
  LLM_FFN_SWIGLU,
40
+ LLM_FFN_GEGLU,
38
41
  };
39
42
 
40
43
  enum llm_ffn_gate_type {
@@ -92,14 +95,14 @@ public:
92
95
 
93
96
  class llm_graph_input_pos : public llm_graph_input_i {
94
97
  public:
95
- llm_graph_input_pos(int64_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {}
98
+ llm_graph_input_pos(uint32_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {}
96
99
  virtual ~llm_graph_input_pos() = default;
97
100
 
98
101
  void set_input(const llama_ubatch * ubatch) override;
99
102
 
100
103
  lm_ggml_tensor * pos = nullptr; // I32 [n_batch]
101
104
 
102
- const int64_t n_pos_per_embd = 1;
105
+ const uint32_t n_pos_per_embd = 1;
103
106
  };
104
107
 
105
108
  // temperature tuning, used by llama4
@@ -133,7 +136,7 @@ class llm_graph_input_pos_bucket_kv : public llm_graph_input_i {
133
136
  public:
134
137
  llm_graph_input_pos_bucket_kv(
135
138
  const llama_hparams & hparams,
136
- const llama_kv_cache_unified * kv_self) : hparams(hparams), kv_self(kv_self) {}
139
+ const llama_kv_cache_unified_context * mctx) : hparams(hparams), mctx(mctx) {}
137
140
  virtual ~llm_graph_input_pos_bucket_kv() = default;
138
141
 
139
142
  void set_input(const llama_ubatch * ubatch) override;
@@ -141,7 +144,8 @@ public:
141
144
  lm_ggml_tensor * pos_bucket = nullptr; // I32 [n_kv, n_batch]
142
145
 
143
146
  const llama_hparams & hparams;
144
- const llama_kv_cache_unified * kv_self;
147
+
148
+ const llama_kv_cache_unified_context * mctx;
145
149
  };
146
150
 
147
151
  class llm_graph_input_out_ids : public llm_graph_input_i {
@@ -186,28 +190,16 @@ public:
186
190
  const llama_cparams & cparams;
187
191
  };
188
192
 
189
- class llm_graph_input_s_copy : public llm_graph_input_i {
193
+ class llm_graph_input_rs : public llm_graph_input_i {
190
194
  public:
191
- llm_graph_input_s_copy(const llama_kv_cache_recurrent * kv_self) : kv_self(kv_self) {}
192
- virtual ~llm_graph_input_s_copy() = default;
195
+ llm_graph_input_rs(const llama_memory_recurrent_context * mctx) : mctx(mctx) {}
196
+ virtual ~llm_graph_input_rs() = default;
193
197
 
194
198
  void set_input(const llama_ubatch * ubatch) override;
195
199
 
196
200
  lm_ggml_tensor * s_copy; // I32 [kv_size]
197
201
 
198
- const llama_kv_cache_recurrent * kv_self;
199
- };
200
-
201
- class llm_graph_input_s_mask : public llm_graph_input_i {
202
- public:
203
- llm_graph_input_s_mask(const llama_kv_cache_recurrent * kv_self) : kv_self(kv_self) {}
204
- virtual ~llm_graph_input_s_mask() = default;
205
-
206
- void set_input(const llama_ubatch * ubatch) override;
207
-
208
- lm_ggml_tensor * s_mask; // F32 [1, n_kv]
209
-
210
- const llama_kv_cache_recurrent * kv_self;
202
+ const llama_memory_recurrent_context * mctx;
211
203
  };
212
204
 
213
205
  class llm_graph_input_cross_embd : public llm_graph_input_i {
@@ -247,10 +239,10 @@ public:
247
239
  llm_graph_input_attn_kv_unified(
248
240
  const llama_hparams & hparams,
249
241
  const llama_cparams & cparams,
250
- const llama_kv_cache_unified * kv_self) :
242
+ const llama_kv_cache_unified_context * mctx) :
251
243
  hparams(hparams),
252
244
  cparams(cparams),
253
- kv_self(kv_self) {
245
+ mctx(mctx) {
254
246
  }
255
247
  ~llm_graph_input_attn_kv_unified() = default;
256
248
 
@@ -264,7 +256,7 @@ public:
264
256
  const llama_hparams & hparams;
265
257
  const llama_cparams & cparams;
266
258
 
267
- const llama_kv_cache_unified * kv_self;
259
+ const llama_kv_cache_unified_context * mctx;
268
260
  };
269
261
 
270
262
  class llm_graph_input_attn_kv_unified_iswa : public llm_graph_input_i {
@@ -272,10 +264,10 @@ public:
272
264
  llm_graph_input_attn_kv_unified_iswa(
273
265
  const llama_hparams & hparams,
274
266
  const llama_cparams & cparams,
275
- const llama_kv_cache_unified_iswa * kv_self) :
267
+ const llama_kv_cache_unified_iswa_context * mctx) :
276
268
  hparams(hparams),
277
269
  cparams(cparams),
278
- kv_self(kv_self) {
270
+ mctx(mctx) {
279
271
  }
280
272
  ~llm_graph_input_attn_kv_unified_iswa() = default;
281
273
 
@@ -292,7 +284,7 @@ public:
292
284
  const llama_hparams & hparams;
293
285
  const llama_cparams & cparams;
294
286
 
295
- const llama_kv_cache_unified_iswa * kv_self;
287
+ const llama_kv_cache_unified_iswa_context * mctx;
296
288
  };
297
289
 
298
290
  class llm_graph_input_attn_cross : public llm_graph_input_i {
@@ -310,6 +302,44 @@ public:
310
302
  const llama_cross * cross = nullptr;
311
303
  };
312
304
 
305
+ class llm_graph_input_mem_hybrid : public llm_graph_input_i {
306
+ public:
307
+ llm_graph_input_mem_hybrid(
308
+ const llama_hparams & hparams,
309
+ const llama_cparams & cparams,
310
+ const llama_memory_hybrid_context * mctx) :
311
+ hparams(hparams),
312
+ cparams(cparams),
313
+ mctx(mctx) {
314
+ }
315
+ virtual ~llm_graph_input_mem_hybrid() = default;
316
+
317
+ void set_input(const llama_ubatch * ubatch) override;
318
+
319
+ lm_ggml_tensor * s_copy; // I32 [kv_size]
320
+
321
+ lm_ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; }
322
+
323
+ lm_ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch]
324
+ lm_ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch]
325
+
326
+ const llama_hparams & hparams;
327
+ const llama_cparams & cparams;
328
+
329
+ const llama_memory_hybrid_context * mctx;
330
+ };
331
+
332
+ // TODO: remove this when lm_ggml_scale_add is implemented
333
+ class llm_graph_input_one : public llm_graph_input_i {
334
+ public:
335
+ llm_graph_input_one() {}
336
+ virtual ~llm_graph_input_one() = default;
337
+
338
+ void set_input(const llama_ubatch *) override;
339
+
340
+ lm_ggml_tensor * one = nullptr; // F32
341
+ };
342
+
313
343
  //
314
344
  // llm_graph_result
315
345
  //
@@ -383,12 +413,12 @@ struct llm_graph_params {
383
413
  lm_ggml_backend_sched_t sched;
384
414
  lm_ggml_backend_t backend_cpu;
385
415
 
386
- const llama_adapter_cvec * cvec;
387
- const llama_adapter_loras * loras;
388
- const llama_memory_i * memory;
389
- const llama_cross * cross;
416
+ const llama_adapter_cvec * cvec;
417
+ const llama_adapter_loras * loras;
418
+ const llama_memory_context_i * mctx;
419
+ const llama_cross * cross;
390
420
 
391
- int32_t n_outputs;
421
+ uint32_t n_outputs;
392
422
 
393
423
  const llm_graph_cb & cb;
394
424
  };
@@ -422,8 +452,8 @@ struct llm_graph_context {
422
452
  const float norm_eps;
423
453
  const float norm_rms_eps;
424
454
 
425
- const int32_t n_tokens;
426
- const int32_t n_outputs;
455
+ const int64_t n_tokens;
456
+ const int64_t n_outputs;
427
457
  const int32_t n_ctx_orig; // yarn
428
458
 
429
459
  const enum llama_pooling_type pooling_type;
@@ -435,10 +465,10 @@ struct llm_graph_context {
435
465
 
436
466
  lm_ggml_backend_t backend_cpu; // TODO: needed by build_attn_mha, figure out a way to remove?
437
467
 
438
- const llama_adapter_cvec * cvec;
439
- const llama_adapter_loras * loras;
440
- const llama_memory_i * memory;
441
- const llama_cross * cross;
468
+ const llama_adapter_cvec * cvec;
469
+ const llama_adapter_loras * loras;
470
+ const llama_memory_context_i * mctx;
471
+ const llama_cross * cross;
442
472
 
443
473
  const llm_graph_cb & cb_func;
444
474
 
@@ -446,8 +476,6 @@ struct llm_graph_context {
446
476
 
447
477
  llm_graph_context(const llm_graph_params & params);
448
478
 
449
- int64_t n_pos_per_embd() const;
450
-
451
479
  void cb(lm_ggml_tensor * cur, const char * name, int il) const;
452
480
 
453
481
  //
@@ -518,14 +546,14 @@ struct llm_graph_context {
518
546
  lm_ggml_tensor * build_inp_out_ids() const;
519
547
  lm_ggml_tensor * build_inp_mean() const;
520
548
  lm_ggml_tensor * build_inp_cls() const;
521
- lm_ggml_tensor * build_inp_s_copy() const;
522
- lm_ggml_tensor * build_inp_s_mask() const;
523
549
 
524
550
  lm_ggml_tensor * build_inp_cross_embd() const;
525
551
  lm_ggml_tensor * build_inp_pos_bucket_enc() const;
526
552
  lm_ggml_tensor * build_inp_pos_bucket_dec() const;
527
553
  lm_ggml_tensor * build_pos_bias(lm_ggml_tensor * pos_bucket, lm_ggml_tensor * attn_rel_b) const;
528
554
 
555
+ llm_graph_input_mem_hybrid * build_inp_mem_hybrid() const;
556
+
529
557
  //
530
558
  // attention
531
559
  //
@@ -572,14 +600,15 @@ struct llm_graph_context {
572
600
 
573
601
  llm_graph_input_attn_kv_unified_iswa * build_attn_inp_kv_unified_iswa() const;
574
602
 
603
+ // note: if k_cur or v_cur are not provided, they will not be stored in the memory
575
604
  lm_ggml_tensor * build_attn(
576
605
  llm_graph_input_attn_kv_unified_iswa * inp,
577
606
  lm_ggml_cgraph * gf,
578
607
  lm_ggml_tensor * wo,
579
608
  lm_ggml_tensor * wo_b,
580
609
  lm_ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
581
- lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
582
- lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
610
+ lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens] optional
611
+ lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens] optional
583
612
  lm_ggml_tensor * kq_b,
584
613
  lm_ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v]
585
614
  float kq_scale,
@@ -600,23 +629,62 @@ struct llm_graph_context {
600
629
  float kq_scale,
601
630
  int il) const;
602
631
 
632
+ lm_ggml_tensor * build_attn(
633
+ llm_graph_input_mem_hybrid * inp,
634
+ lm_ggml_cgraph * gf,
635
+ lm_ggml_tensor * wo,
636
+ lm_ggml_tensor * wo_b,
637
+ lm_ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
638
+ lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
639
+ lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
640
+ lm_ggml_tensor * kq_b,
641
+ lm_ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v]
642
+ float kq_scale,
643
+ int il) const;
603
644
  //
604
645
  // recurrent
605
646
  //
606
647
 
607
- lm_ggml_tensor * build_copy_mask_state(
608
- lm_ggml_cgraph * gf,
609
- lm_ggml_tensor * s,
610
- lm_ggml_tensor * state_copy,
611
- lm_ggml_tensor * state_mask,
612
- int32_t n_state,
613
- int32_t n_seqs) const;
648
+ // TODO: avoid notion of "kv"
649
+ // TODO: move this implementation to llama_memory_recurrent.
650
+ // this is analogous to llama_kv_cache_unified::cpy_k / cpy_v
651
+ // when moving, avoid passing `lm_ggml_cgraph` - only pass `lm_ggml_context`. would likely need to split the
652
+ // implementation in 2 separate methods. the goal is to avoid calling `lm_ggml_build_forward_expand` in
653
+ // `llama_memory_recurrent`
654
+ lm_ggml_tensor * build_rs(
655
+ lm_ggml_cgraph * gf,
656
+ lm_ggml_tensor * s,
657
+ lm_ggml_tensor * state_copy,
658
+ int32_t state_size,
659
+ int32_t n_seqs,
660
+ uint32_t n_kv,
661
+ uint32_t kv_head,
662
+ uint32_t kv_size,
663
+ int32_t rs_zero,
664
+ bool avoid_copies = false) const;
665
+
666
+ llm_graph_input_rs * build_rs_inp() const;
667
+
668
+ lm_ggml_tensor * build_rs(
669
+ llm_graph_input_rs * inp,
670
+ lm_ggml_cgraph * gf,
671
+ lm_ggml_tensor * s,
672
+ int32_t state_size,
673
+ int32_t n_seqs,
674
+ bool avoid_copies = false) const;
675
+
676
+ lm_ggml_tensor * build_rs(
677
+ llm_graph_input_mem_hybrid * inp,
678
+ lm_ggml_cgraph * gf,
679
+ lm_ggml_tensor * s,
680
+ int32_t state_size,
681
+ int32_t n_seqs,
682
+ bool avoid_copies = false) const;
614
683
 
615
684
  lm_ggml_tensor * build_rwkv_token_shift_load(
616
- lm_ggml_cgraph * gf,
617
- lm_ggml_tensor * state_copy,
618
- lm_ggml_tensor * state_mask,
619
- const llama_ubatch & ubatch,
685
+ llm_graph_input_rs * inp,
686
+ lm_ggml_cgraph * gf,
687
+ const llama_ubatch & ubatch,
620
688
  int il) const;
621
689
 
622
690
  lm_ggml_tensor * build_rwkv_token_shift_store(
@@ -115,6 +115,9 @@ struct llama_hparams {
115
115
  uint32_t ssm_d_state = 0;
116
116
  uint32_t ssm_dt_rank = 0;
117
117
 
118
+ // for hybrid state space models
119
+ std::array<bool, LLAMA_MAX_LAYERS> recurrent_layer_arr;
120
+
118
121
  bool ssm_dt_b_c_rms = false;
119
122
 
120
123
  float f_clamp_kqv = 0.0f;
@@ -131,12 +134,21 @@ struct llama_hparams {
131
134
  bool attn_soft_cap = false;
132
135
  bool use_kq_norm = true;
133
136
 
137
+ // for Classifiers
138
+ uint32_t n_cls_out = 1;
139
+
134
140
  // llama4
135
141
  uint32_t n_moe_layer_step = 0;
136
142
  uint32_t n_no_rope_layer_step = 4;
137
143
  uint32_t n_attn_temp_floor_scale = 8192;
138
144
  float f_attn_temp_scale = 0.1;
139
145
 
146
+ // gemma3n altup
147
+ uint32_t n_altup = 4; // altup_num_inputs
148
+ uint32_t i_altup_act = 0; // altup_active_idx
149
+ uint32_t laurel_rank = 64;
150
+ uint32_t n_embd_altup = 256;
151
+
140
152
  // needed by encoder-decoder models (e.g. T5, FLAN-T5)
141
153
  // ref: https://github.com/ggerganov/llama.cpp/pull/8141
142
154
  llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
@@ -178,10 +190,15 @@ struct llama_hparams {
178
190
 
179
191
  // dimension of the rolling state embeddings
180
192
  // corresponds to Mamba's conv_states size or RWKV's token_shift states size
181
- uint32_t n_embd_k_s() const;
193
+ uint32_t n_embd_r() const;
182
194
 
183
195
  // dimension of the recurrent state embeddings
184
- uint32_t n_embd_v_s() const;
196
+ uint32_t n_embd_s() const;
197
+
198
+ // whether or not the given layer is recurrent (for hybrid models)
199
+ bool is_recurrent(uint32_t il) const;
200
+
201
+ uint32_t n_pos_per_embd() const;
185
202
 
186
203
  bool is_swa(uint32_t il) const;
187
204
  };
@@ -0,0 +1,128 @@
1
+ #pragma once
2
+
3
+ #include "llama-kv-cache-unified.h"
4
+
5
+ #include <vector>
6
+
7
+ //
8
+ // llama_kv_cache_unified_iswa
9
+ //
10
+
11
+ // utilizes two instances of llama_kv_cache_unified
12
+ // the first instance is for the non-SWA layers of the model and the second instance is for the SWA layers
13
+
14
+ class llama_kv_cache_unified_iswa : public llama_memory_i {
15
+ public:
16
+ llama_kv_cache_unified_iswa(
17
+ const llama_model & model,
18
+ lm_ggml_type type_k,
19
+ lm_ggml_type type_v,
20
+ bool v_trans,
21
+ bool offload,
22
+ bool swa_full,
23
+ uint32_t kv_size,
24
+ uint32_t n_seq_max,
25
+ uint32_t n_ubatch,
26
+ uint32_t n_pad);
27
+
28
+ ~llama_kv_cache_unified_iswa() = default;
29
+
30
+ //
31
+ // llama_memory_i
32
+ //
33
+
34
+ llama_memory_context_ptr init_batch(
35
+ llama_batch_allocr & balloc,
36
+ uint32_t n_ubatch,
37
+ bool embd_all) override;
38
+
39
+ llama_memory_context_ptr init_full() override;
40
+
41
+ llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override;
42
+
43
+ bool get_can_shift() const override;
44
+
45
+ void clear(bool data) override;
46
+
47
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
48
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
49
+ void seq_keep(llama_seq_id seq_id) override;
50
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
51
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
52
+
53
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
54
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
55
+
56
+ // state write/load
57
+
58
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
59
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
60
+
61
+ //
62
+ // llama_kv_cache_unified_iswa specific API
63
+ //
64
+
65
+ llama_kv_cache_unified * get_base() const;
66
+ llama_kv_cache_unified * get_swa () const;
67
+
68
+ private:
69
+ const llama_hparams & hparams;
70
+
71
+ std::unique_ptr<llama_kv_cache_unified> kv_base;
72
+ std::unique_ptr<llama_kv_cache_unified> kv_swa;
73
+ };
74
+
75
+ class llama_kv_cache_unified_iswa_context : public llama_memory_context_i {
76
+ public:
77
+ // used for errors
78
+ llama_kv_cache_unified_iswa_context(llama_memory_status status);
79
+
80
+ // used to create a full-cache context
81
+ llama_kv_cache_unified_iswa_context(
82
+ llama_kv_cache_unified_iswa * kv);
83
+
84
+ // used to create an update context
85
+ llama_kv_cache_unified_iswa_context(
86
+ llama_kv_cache_unified_iswa * kv,
87
+ llama_context * lctx,
88
+ bool optimize);
89
+
90
+ // used to create a batch processing context from a batch
91
+ llama_kv_cache_unified_iswa_context(
92
+ llama_kv_cache_unified_iswa * kv,
93
+ std::vector<uint32_t> heads_base,
94
+ std::vector<uint32_t> heads_swa,
95
+ std::vector<llama_ubatch> ubatches);
96
+
97
+ virtual ~llama_kv_cache_unified_iswa_context();
98
+
99
+ //
100
+ // llama_memory_context_i
101
+ //
102
+
103
+ bool next() override;
104
+ bool apply() override;
105
+
106
+ llama_memory_status get_status() const override;
107
+ const llama_ubatch & get_ubatch() const override;
108
+
109
+ //
110
+ // llama_kv_cache_unified_iswa_context specific API
111
+ //
112
+
113
+ const llama_kv_cache_unified_context * get_base() const;
114
+ const llama_kv_cache_unified_context * get_swa() const;
115
+
116
+ private:
117
+ //llama_kv_cache_unified_iswa * kv;
118
+
119
+ // the index of the next ubatch to process
120
+ size_t i_next = 0;
121
+
122
+ std::vector<llama_ubatch> ubatches;
123
+
124
+ const llama_memory_context_ptr ctx_base;
125
+ const llama_memory_context_ptr ctx_swa;
126
+
127
+ const llama_memory_status status;
128
+ };