cui-llama.rn 1.5.0 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (324) hide show
  1. package/LICENSE +20 -20
  2. package/README.md +345 -319
  3. package/android/build.gradle +116 -116
  4. package/android/gradle.properties +5 -5
  5. package/android/src/main/AndroidManifest.xml +4 -4
  6. package/android/src/main/CMakeLists.txt +129 -124
  7. package/android/src/main/java/com/rnllama/LlamaContext.java +648 -645
  8. package/android/src/main/java/com/rnllama/RNLlama.java +695 -695
  9. package/android/src/main/java/com/rnllama/RNLlamaPackage.java +48 -48
  10. package/android/src/main/jni-utils.h +100 -100
  11. package/android/src/main/jni.cpp +1279 -1263
  12. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  13. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  14. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  15. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  16. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  17. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  18. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  19. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  20. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +135 -135
  21. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +136 -136
  22. package/cpp/LICENSE +21 -0
  23. package/cpp/README.md +4 -4
  24. package/cpp/chat.cpp +1 -1
  25. package/cpp/common.cpp +17 -2
  26. package/cpp/common.h +7 -3
  27. package/cpp/ggml-alloc.c +4 -1
  28. package/cpp/ggml-cpp.h +1 -1
  29. package/cpp/ggml-cpu/amx/amx.cpp +221 -0
  30. package/cpp/ggml-cpu/amx/amx.h +8 -0
  31. package/cpp/ggml-cpu/amx/common.h +91 -0
  32. package/cpp/ggml-cpu/amx/mmq.cpp +2511 -0
  33. package/cpp/ggml-cpu/amx/mmq.h +10 -0
  34. package/cpp/{binary-ops.h → ggml-cpu/binary-ops.h} +1 -1
  35. package/cpp/ggml-cpu/common.h +72 -0
  36. package/cpp/{ggml-cpu-aarch64.cpp → ggml-cpu/ggml-cpu-aarch64.cpp} +809 -101
  37. package/cpp/{ggml-cpu.c → ggml-cpu/ggml-cpu.c} +109 -42
  38. package/cpp/{ggml-cpu.cpp → ggml-cpu/ggml-cpu.cpp} +3 -0
  39. package/cpp/{ops.cpp → ggml-cpu/ops.cpp} +246 -160
  40. package/cpp/{ops.h → ggml-cpu/ops.h} +2 -20
  41. package/cpp/{sgemm.cpp → ggml-cpu/sgemm.cpp} +501 -0
  42. package/cpp/{simd-mappings.h → ggml-cpu/simd-mappings.h} +7 -3
  43. package/cpp/{unary-ops.h → ggml-cpu/unary-ops.h} +1 -1
  44. package/cpp/ggml-cpu.h +5 -0
  45. package/cpp/ggml-impl.h +16 -9
  46. package/cpp/ggml-llama-sim.metallib +0 -0
  47. package/cpp/ggml-llama.metallib +0 -0
  48. package/cpp/ggml-metal-impl.h +597 -597
  49. package/cpp/ggml-metal.m +496 -47
  50. package/cpp/ggml.c +134 -244
  51. package/cpp/ggml.h +62 -95
  52. package/cpp/json-schema-to-grammar.cpp +3 -0
  53. package/cpp/llama-arch.cpp +46 -17
  54. package/cpp/llama-arch.h +9 -0
  55. package/cpp/llama-batch.cpp +5 -1
  56. package/cpp/llama-batch.h +2 -1
  57. package/cpp/llama-chat.cpp +31 -10
  58. package/cpp/llama-chat.h +3 -2
  59. package/cpp/llama-context.cpp +104 -489
  60. package/cpp/llama-context.h +14 -30
  61. package/cpp/llama-graph.cpp +69 -62
  62. package/cpp/llama-graph.h +21 -18
  63. package/cpp/llama-hparams.h +5 -0
  64. package/cpp/llama-kv-cache.cpp +1497 -391
  65. package/cpp/llama-kv-cache.h +272 -80
  66. package/cpp/llama-memory.h +11 -1
  67. package/cpp/llama-model.cpp +502 -176
  68. package/cpp/llama-model.h +13 -3
  69. package/cpp/llama-sampling.cpp +2 -1
  70. package/cpp/llama-vocab.cpp +8 -1
  71. package/cpp/llama.h +14 -11
  72. package/cpp/rn-llama.cpp +721 -873
  73. package/cpp/rn-llama.h +134 -138
  74. package/cpp/sampling.h +107 -107
  75. package/cpp/unicode-data.cpp +7034 -7034
  76. package/cpp/unicode-data.h +20 -20
  77. package/cpp/unicode.cpp +849 -849
  78. package/cpp/unicode.h +66 -66
  79. package/ios/CMakeLists.txt +119 -108
  80. package/ios/RNLlama.h +13 -7
  81. package/ios/RNLlama.mm +423 -405
  82. package/ios/RNLlamaContext.h +57 -57
  83. package/ios/RNLlamaContext.mm +833 -835
  84. package/ios/rnllama.xcframework/Info.plist +74 -74
  85. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +143 -0
  86. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +681 -0
  87. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
  88. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
  89. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  90. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
  91. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
  92. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
  93. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +143 -0
  94. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +601 -0
  95. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  96. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
  97. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
  98. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
  99. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
  100. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +2189 -0
  101. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/gguf.h +202 -0
  102. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  103. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json.hpp +24766 -0
  104. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
  105. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +437 -0
  106. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +89 -0
  107. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +57 -0
  108. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +249 -0
  109. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
  110. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
  111. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
  112. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +595 -0
  113. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +161 -0
  114. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
  115. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-io.h +35 -0
  116. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +405 -0
  117. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +31 -0
  118. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
  119. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
  120. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +419 -0
  121. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
  122. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
  123. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +1437 -0
  124. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/log.h +132 -0
  125. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  126. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  127. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +134 -0
  128. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sampling.h +107 -0
  129. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/speculative.h +28 -0
  130. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
  131. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode.h +66 -0
  132. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
  133. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  134. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
  135. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
  136. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +681 -0
  137. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
  138. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
  139. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  140. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
  141. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
  142. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
  143. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +143 -0
  144. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +601 -0
  145. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  146. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
  147. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
  148. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
  149. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
  150. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2189 -0
  151. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
  152. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  153. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
  154. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
  155. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +437 -0
  156. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +89 -0
  157. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +57 -0
  158. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +249 -0
  159. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
  160. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
  161. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
  162. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +595 -0
  163. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +161 -0
  164. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
  165. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
  166. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +405 -0
  167. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +31 -0
  168. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
  169. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
  170. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +419 -0
  171. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
  172. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
  173. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1437 -0
  174. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
  175. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  176. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  177. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +134 -0
  178. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
  179. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
  180. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
  181. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
  182. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  183. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
  184. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  185. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  186. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +143 -0
  187. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +681 -0
  188. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
  189. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
  190. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  191. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
  192. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
  193. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
  194. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +143 -0
  195. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +601 -0
  196. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  197. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
  198. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
  199. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
  200. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
  201. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +2189 -0
  202. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/gguf.h +202 -0
  203. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  204. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json.hpp +24766 -0
  205. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
  206. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +437 -0
  207. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +89 -0
  208. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +57 -0
  209. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +249 -0
  210. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
  211. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
  212. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
  213. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +595 -0
  214. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +161 -0
  215. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
  216. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-io.h +35 -0
  217. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +405 -0
  218. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +31 -0
  219. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
  220. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
  221. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +419 -0
  222. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
  223. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
  224. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +1437 -0
  225. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/log.h +132 -0
  226. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  227. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  228. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +134 -0
  229. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sampling.h +107 -0
  230. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/speculative.h +28 -0
  231. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
  232. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode.h +66 -0
  233. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
  234. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  235. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
  236. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
  237. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +681 -0
  238. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
  239. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
  240. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  241. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
  242. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
  243. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
  244. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +143 -0
  245. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +601 -0
  246. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  247. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
  248. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
  249. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
  250. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
  251. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2189 -0
  252. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
  253. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  254. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
  255. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
  256. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +437 -0
  257. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +89 -0
  258. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +57 -0
  259. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +249 -0
  260. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
  261. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
  262. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
  263. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +595 -0
  264. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +161 -0
  265. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
  266. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
  267. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +405 -0
  268. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +31 -0
  269. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
  270. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
  271. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +419 -0
  272. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
  273. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
  274. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1437 -0
  275. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
  276. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  277. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  278. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +134 -0
  279. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
  280. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
  281. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
  282. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
  283. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  284. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
  285. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  286. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  287. package/jest/mock.js +203 -203
  288. package/lib/commonjs/NativeRNLlama.js +1 -2
  289. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  290. package/lib/commonjs/chat.js.map +1 -1
  291. package/lib/commonjs/grammar.js +12 -31
  292. package/lib/commonjs/grammar.js.map +1 -1
  293. package/lib/commonjs/index.js +47 -47
  294. package/lib/commonjs/index.js.map +1 -1
  295. package/lib/commonjs/package.json +1 -0
  296. package/lib/module/NativeRNLlama.js +2 -0
  297. package/lib/module/NativeRNLlama.js.map +1 -1
  298. package/lib/module/chat.js +2 -0
  299. package/lib/module/chat.js.map +1 -1
  300. package/lib/module/grammar.js +14 -31
  301. package/lib/module/grammar.js.map +1 -1
  302. package/lib/module/index.js +47 -45
  303. package/lib/module/index.js.map +1 -1
  304. package/lib/module/package.json +1 -0
  305. package/lib/typescript/NativeRNLlama.d.ts +10 -4
  306. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  307. package/lib/typescript/index.d.ts.map +1 -1
  308. package/llama-rn.podspec +48 -48
  309. package/package.json +233 -233
  310. package/src/NativeRNLlama.ts +431 -426
  311. package/src/chat.ts +44 -44
  312. package/src/grammar.ts +854 -854
  313. package/src/index.ts +495 -487
  314. /package/cpp/{binary-ops.cpp → ggml-cpu/binary-ops.cpp} +0 -0
  315. /package/cpp/{ggml-cpu-aarch64.h → ggml-cpu/ggml-cpu-aarch64.h} +0 -0
  316. /package/cpp/{ggml-cpu-impl.h → ggml-cpu/ggml-cpu-impl.h} +0 -0
  317. /package/cpp/{ggml-cpu-quants.c → ggml-cpu/ggml-cpu-quants.c} +0 -0
  318. /package/cpp/{ggml-cpu-quants.h → ggml-cpu/ggml-cpu-quants.h} +0 -0
  319. /package/cpp/{ggml-cpu-traits.cpp → ggml-cpu/ggml-cpu-traits.cpp} +0 -0
  320. /package/cpp/{ggml-cpu-traits.h → ggml-cpu/ggml-cpu-traits.h} +0 -0
  321. /package/cpp/{sgemm.h → ggml-cpu/sgemm.h} +0 -0
  322. /package/cpp/{unary-ops.cpp → ggml-cpu/unary-ops.cpp} +0 -0
  323. /package/cpp/{vec.cpp → ggml-cpu/vec.cpp} +0 -0
  324. /package/cpp/{vec.h → ggml-cpu/vec.h} +0 -0
@@ -0,0 +1,161 @@
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+
5
+ #include <array>
6
+
7
+ // bump if necessary
8
+ #define LLAMA_MAX_LAYERS 512
9
+ #define LLAMA_MAX_EXPERTS 256 // DeepSeekV3
10
+
11
+ enum llama_expert_gating_func_type {
12
+ LLAMA_EXPERT_GATING_FUNC_TYPE_NONE = 0,
13
+ LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX = 1,
14
+ LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID = 2,
15
+ };
16
+
17
+ struct llama_hparams_posnet {
18
+ uint32_t n_embd;
19
+ uint32_t n_layer;
20
+ };
21
+
22
+ struct llama_hparams_convnext {
23
+ uint32_t n_embd;
24
+ uint32_t n_layer;
25
+ };
26
+
27
+ struct llama_hparams {
28
+ bool vocab_only;
29
+ bool rope_finetuned;
30
+ bool use_par_res;
31
+ bool swin_norm;
32
+
33
+ uint32_t n_ctx_train; // context size the model was trained on
34
+ uint32_t n_embd;
35
+ uint32_t n_embd_features = 0;
36
+ uint32_t n_layer;
37
+ uint32_t n_rot;
38
+ uint32_t n_swa = 0; // sliding window attention (SWA)
39
+ uint32_t n_swa_pattern = 1; // by default, all layers use non-sliding-window attention
40
+ uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
41
+ uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
42
+ uint32_t n_expert = 0;
43
+ uint32_t n_expert_used = 0;
44
+ uint32_t n_rel_attn_bkts = 0;
45
+
46
+ // note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA
47
+ uint32_t n_embd_head_k_mla = 0;
48
+ uint32_t n_embd_head_v_mla = 0;
49
+
50
+ // for WavTokenizer
51
+ struct llama_hparams_posnet posnet;
52
+ struct llama_hparams_convnext convnext;
53
+
54
+ std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_arr;
55
+ std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_kv_arr;
56
+ std::array<uint32_t, LLAMA_MAX_LAYERS> n_ff_arr;
57
+
58
+ uint32_t n_layer_dense_lead = 0;
59
+ uint32_t n_lora_q = 0;
60
+ uint32_t n_lora_kv = 0;
61
+ uint32_t n_ff_exp = 0;
62
+ uint32_t n_ff_shexp = 0;
63
+ uint32_t n_expert_shared = 0;
64
+ uint32_t n_norm_groups = 0;
65
+
66
+ float expert_weights_scale = 0.0;
67
+ bool expert_weights_norm = false;
68
+ uint32_t expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_NONE;
69
+ uint32_t moe_every_n_layers = 0;
70
+
71
+ float f_norm_eps;
72
+ float f_norm_rms_eps;
73
+ float f_norm_group_eps;
74
+
75
+ float f_attn_logit_softcapping = 50.0f;
76
+ float f_final_logit_softcapping = 30.0f;
77
+
78
+ // for RWKV
79
+ uint32_t rescale_every_n_layers = 0;
80
+ uint32_t time_mix_extra_dim = 0;
81
+ uint32_t time_decay_extra_dim = 0;
82
+ uint32_t wkv_head_size = 0;
83
+ uint32_t token_shift_count = 2;
84
+ uint32_t n_lora_decay = 0;
85
+ uint32_t n_lora_iclr = 0;
86
+ uint32_t n_lora_value_res_mix = 0;
87
+ uint32_t n_lora_gate = 0;
88
+
89
+ float rope_attn_factor = 1.0f;
90
+ float rope_freq_base_train;
91
+ float rope_freq_base_train_swa;
92
+ float rope_freq_scale_train;
93
+ float rope_freq_scale_train_swa;
94
+ uint32_t n_ctx_orig_yarn;
95
+ float rope_yarn_log_mul;
96
+
97
+ std::array<int, 4> rope_sections;
98
+
99
+ // for State Space Models
100
+ uint32_t ssm_d_conv = 0;
101
+ uint32_t ssm_d_inner = 0;
102
+ uint32_t ssm_d_state = 0;
103
+ uint32_t ssm_dt_rank = 0;
104
+
105
+ bool ssm_dt_b_c_rms = false;
106
+
107
+ float f_clamp_kqv = 0.0f;
108
+ float f_max_alibi_bias = 0.0f;
109
+ float f_logit_scale = 0.0f;
110
+
111
+ // Additional scale factors (Granite/Granite MoE)
112
+ float f_residual_scale = 0.0f;
113
+ float f_embedding_scale = 0.0f;
114
+ float f_attention_scale = 0.0f;
115
+
116
+ bool causal_attn = true;
117
+ bool use_alibi = false;
118
+ bool attn_soft_cap = false;
119
+
120
+ uint32_t n_moe_layer_step = 0;
121
+ bool use_kq_norm = true;
122
+ uint32_t n_attn_chunk = 0;
123
+ // values below seems to be fixed on llama4
124
+ uint32_t n_no_rope_layer_step = 4;
125
+ uint32_t n_attn_temp_floor_scale = 8192;
126
+ float f_attn_temp_scale = 0.1;
127
+
128
+ // needed by encoder-decoder models (e.g. T5, FLAN-T5)
129
+ // ref: https://github.com/ggerganov/llama.cpp/pull/8141
130
+ llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
131
+
132
+ enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE;
133
+ enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
134
+ enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
135
+
136
+ uint32_t n_head(uint32_t il = 0) const;
137
+
138
+ uint32_t n_head_kv(uint32_t il = 0) const;
139
+
140
+ uint32_t n_ff(uint32_t il = 0) const;
141
+
142
+ uint32_t n_gqa(uint32_t il = 0) const;
143
+
144
+ // dimension of key embeddings across all k-v heads
145
+ uint32_t n_embd_k_gqa(uint32_t il = 0) const;
146
+
147
+ // dimension of value embeddings across all k-v heads
148
+ uint32_t n_embd_v_gqa(uint32_t il = 0) const;
149
+
150
+ // dimension of the rolling state embeddings
151
+ // corresponds to Mamba's conv_states size or RWKV's token_shift states size
152
+ uint32_t n_embd_k_s() const;
153
+
154
+ // dimension of the recurrent state embeddings
155
+ uint32_t n_embd_v_s() const;
156
+
157
+ bool is_swa(uint32_t il) const;
158
+ };
159
+
160
+ static_assert(std::is_trivially_copyable<llama_hparams>::value, "llama_hparams must be trivially copyable");
161
+
@@ -0,0 +1,61 @@
1
+ #pragma once
2
+
3
+ #include "ggml.h" // for lm_ggml_log_level
4
+
5
+ #include <string>
6
+ #include <vector>
7
+
8
+ #ifdef __GNUC__
9
+ # if defined(__MINGW32__) && !defined(__clang__)
10
+ # define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
11
+ # else
12
+ # define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
13
+ # endif
14
+ #else
15
+ # define LLAMA_ATTRIBUTE_FORMAT(...)
16
+ #endif
17
+
18
+ //
19
+ // logging
20
+ //
21
+
22
+ LLAMA_ATTRIBUTE_FORMAT(2, 3)
23
+ void llama_log_internal (lm_ggml_log_level level, const char * format, ...);
24
+ void llama_log_callback_default(lm_ggml_log_level level, const char * text, void * user_data);
25
+
26
+ #define LLAMA_LOG(...) llama_log_internal(LM_GGML_LOG_LEVEL_NONE , __VA_ARGS__)
27
+ #define LLAMA_LOG_INFO(...) llama_log_internal(LM_GGML_LOG_LEVEL_INFO , __VA_ARGS__)
28
+ #define LLAMA_LOG_WARN(...) llama_log_internal(LM_GGML_LOG_LEVEL_WARN , __VA_ARGS__)
29
+ #define LLAMA_LOG_ERROR(...) llama_log_internal(LM_GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
30
+ #define LLAMA_LOG_DEBUG(...) llama_log_internal(LM_GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
31
+ #define LLAMA_LOG_CONT(...) llama_log_internal(LM_GGML_LOG_LEVEL_CONT , __VA_ARGS__)
32
+
33
+ //
34
+ // helpers
35
+ //
36
+
37
+ template <typename T>
38
+ struct no_init {
39
+ T value;
40
+ no_init() { /* do nothing */ }
41
+ };
42
+
43
+ struct time_meas {
44
+ time_meas(int64_t & t_acc, bool disable = false);
45
+ ~time_meas();
46
+
47
+ const int64_t t_start_us;
48
+
49
+ int64_t & t_acc;
50
+ };
51
+
52
+ void replace_all(std::string & s, const std::string & search, const std::string & replace);
53
+
54
+ // TODO: rename to llama_format ?
55
+ LLAMA_ATTRIBUTE_FORMAT(1, 2)
56
+ std::string format(const char * fmt, ...);
57
+
58
+ std::string llama_format_tensor_shape(const std::vector<int64_t> & ne);
59
+ std::string llama_format_tensor_shape(const struct lm_ggml_tensor * t);
60
+
61
+ std::string lm_gguf_kv_to_str(const struct lm_gguf_context * ctx_gguf, int i);
@@ -0,0 +1,35 @@
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+ #include <cstdint>
5
+ #include <string>
6
+
7
+ struct lm_ggml_tensor;
8
+
9
+ class llama_io_write_i {
10
+ public:
11
+ llama_io_write_i() = default;
12
+ virtual ~llama_io_write_i() = default;
13
+
14
+ virtual void write(const void * src, size_t size) = 0;
15
+ virtual void write_tensor(const lm_ggml_tensor * tensor, size_t offset, size_t size) = 0;
16
+
17
+ // bytes written so far
18
+ virtual size_t n_bytes() = 0;
19
+
20
+ void write_string(const std::string & str);
21
+ };
22
+
23
+ class llama_io_read_i {
24
+ public:
25
+ llama_io_read_i() = default;
26
+ virtual ~llama_io_read_i() = default;
27
+
28
+ virtual const uint8_t * read(size_t size) = 0;
29
+ virtual void read_to(void * dst, size_t size) = 0;
30
+
31
+ // bytes read so far
32
+ virtual size_t n_bytes() = 0;
33
+
34
+ void read_string(std::string & str);
35
+ };
@@ -0,0 +1,405 @@
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+ #include "llama-io.h"
5
+ #include "llama-graph.h"
6
+ #include "llama-memory.h"
7
+
8
+ #include "ggml-cpp.h"
9
+
10
+ #include <set>
11
+ #include <vector>
12
+
13
+ struct llama_cparams;
14
+ struct llama_hparams;
15
+ struct llama_ubatch;
16
+ struct llama_sbatch;
17
+ struct llama_model;
18
+ struct llama_context;
19
+
20
+ struct llama_kv_cache : public llama_memory_i {
21
+ virtual ~llama_kv_cache() = default;
22
+
23
+ // call if batch processing fails - restores the cache state
24
+ virtual void restore() = 0;
25
+
26
+ // call after successful batch processing - clears any pending state
27
+ virtual void commit() = 0;
28
+
29
+ // process any pending defrag/shift/etc. operations
30
+ // optionally call once before processing a new batch
31
+ virtual bool update(llama_context & lctx) = 0;
32
+
33
+ // schedule a defrag if the fragmentation threshold is exceeded. otherwise, do nothing
34
+ virtual void defrag_sched(float thold) = 0;
35
+
36
+ // simulate full cache, used for allocating worst-case compute buffers
37
+ virtual void set_full() = 0;
38
+
39
+ //
40
+ // batch processing
41
+ //
42
+
43
+ virtual llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) = 0;
44
+
45
+ // different KV caches require different batch splitting strategies
46
+ virtual llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const = 0;
47
+
48
+ // find an empty slot of size "n_tokens" in the cache
49
+ virtual bool find_slot(const llama_ubatch & batch) = 0;
50
+
51
+ // getters
52
+ virtual int32_t get_n_tokens() const = 0;
53
+ virtual int32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache
54
+ virtual llama_pos get_pos_max() const = 0;
55
+ virtual bool get_can_shift() const = 0;
56
+
57
+ bool get_can_edit() const override { return get_can_shift(); }
58
+
59
+ //
60
+ // state write/read
61
+ //
62
+
63
+ virtual void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const = 0;
64
+ virtual void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) = 0;
65
+ };
66
+
67
+ //
68
+ // llama_kv_cache_guard
69
+ //
70
+
71
+ struct llama_kv_cache_guard {
72
+ llama_kv_cache_guard(llama_kv_cache * kv) : kv(kv) {}
73
+
74
+ ~llama_kv_cache_guard() {
75
+ kv->restore();
76
+ }
77
+
78
+ void commit() {
79
+ kv->commit();
80
+ }
81
+
82
+ private:
83
+ llama_kv_cache * kv;
84
+ };
85
+
86
+ //
87
+ // llama_kv_cache_unified
88
+ //
89
+
90
+ // TODO: add notion of max sequences
91
+ class llama_kv_cache_unified : public llama_kv_cache {
92
+ public:
93
+ struct kv_cell {
94
+ llama_pos pos = -1;
95
+ llama_pos delta = 0;
96
+
97
+ std::set<llama_seq_id> seq_id;
98
+
99
+ bool has_seq_id(const llama_seq_id & id) const {
100
+ return seq_id.find(id) != seq_id.end();
101
+ }
102
+
103
+ bool is_empty() const {
104
+ return seq_id.empty();
105
+ }
106
+
107
+ bool is_same_seq(const kv_cell & other) const {
108
+ return seq_id == other.seq_id;
109
+ }
110
+ };
111
+
112
+ static uint32_t get_padding(const llama_cparams & cparams);
113
+
114
+ llama_kv_cache_unified(
115
+ const llama_model & model,
116
+ lm_ggml_type type_k,
117
+ lm_ggml_type type_v,
118
+ bool v_trans,
119
+ bool offload,
120
+ uint32_t kv_size,
121
+ uint32_t padding);
122
+
123
+ ~llama_kv_cache_unified() = default;
124
+
125
+ //
126
+ // llama_memory_i
127
+ //
128
+
129
+ void clear() override;
130
+
131
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
132
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
133
+ void seq_keep(llama_seq_id seq_id) override;
134
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
135
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
136
+
137
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
138
+
139
+ //
140
+ // llama_kv_cache
141
+ //
142
+
143
+ void restore() override;
144
+ void commit() override;
145
+
146
+ bool update(llama_context & ctx) override;
147
+
148
+ void defrag_sched(float thold) override;
149
+
150
+ void set_full() override;
151
+
152
+ llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
153
+
154
+ llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
155
+
156
+ // updates the cache head
157
+ // Note: On success, it's important that cache.head points
158
+ // to the first cell of the slot.
159
+ bool find_slot(const llama_ubatch & batch) override;
160
+
161
+ int32_t get_n_tokens() const override;
162
+ int32_t get_used_cells() const override;
163
+
164
+ // TODO: better data structures to reduce the cost of this operation
165
+ llama_pos get_pos_max() const override;
166
+
167
+ bool get_can_shift() const override;
168
+
169
+ // state write/load
170
+
171
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
172
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
173
+
174
+ // Note: The value of head isn't only used to optimize searching
175
+ // for a free KV slot. llama_decode_impl also uses it, so it
176
+ // cannot be freely changed after a slot has been allocated.
177
+ uint32_t head = 0;
178
+ uint32_t size = 0;
179
+ uint32_t used = 0; // used cells (i.e. at least one seq_id)
180
+
181
+ // computed before each graph build
182
+ uint32_t n = 0;
183
+
184
+ std::vector<kv_cell> cells;
185
+
186
+ std::vector<lm_ggml_tensor *> k_l; // per layer
187
+ std::vector<lm_ggml_tensor *> v_l;
188
+
189
+ private:
190
+ const llama_model & model;
191
+ const llama_hparams & hparams;
192
+
193
+ bool has_shift = false;
194
+ bool do_defrag = false;
195
+
196
+ bool v_trans = true; // the value tensor is transposed
197
+ bool can_shift = false;
198
+
199
+ // required padding
200
+ uint32_t padding = 1;
201
+
202
+ lm_ggml_type type_k = LM_GGML_TYPE_F16;
203
+ lm_ggml_type type_v = LM_GGML_TYPE_F16;
204
+
205
+ std::vector<lm_ggml_context_ptr> ctxs;
206
+ std::vector<lm_ggml_backend_buffer_ptr> bufs;
207
+
208
+ // defrag
209
+ struct {
210
+ std::vector<uint32_t> ids;
211
+ } defrag_info;
212
+
213
+ // return true if cells have been moved
214
+ bool defrag_prepare(int32_t n_max_nodes);
215
+
216
+ // commit/restore cache
217
+ struct slot_range {
218
+ uint32_t c0 = 0; // note: these are cell indices, not sequence positions
219
+ uint32_t c1 = 0;
220
+ };
221
+
222
+ // pending cell updates that are not yet committed
223
+ struct {
224
+ std::vector<slot_range> ranges;
225
+ } pending;
226
+
227
+ // find how many cells are currently in use
228
+ uint32_t cell_max() const;
229
+
230
+ size_t total_size() const;
231
+
232
+ size_t size_k_bytes() const;
233
+ size_t size_v_bytes() const;
234
+
235
+ lm_ggml_tensor * build_rope_shift(
236
+ const llama_cparams & cparams,
237
+ lm_ggml_context * ctx,
238
+ lm_ggml_tensor * cur,
239
+ lm_ggml_tensor * shift,
240
+ lm_ggml_tensor * factors,
241
+ float freq_base,
242
+ float freq_scale) const;
243
+
244
+ llm_graph_result_ptr build_graph_shift(
245
+ const llama_cparams & cparams,
246
+ lm_ggml_context * ctx,
247
+ lm_ggml_cgraph * gf) const;
248
+
249
+ llm_graph_result_ptr build_graph_defrag(
250
+ const llama_cparams & cparams,
251
+ lm_ggml_context * ctx,
252
+ lm_ggml_cgraph * gf) const;
253
+
254
+ void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
255
+ void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
256
+
257
+ bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
258
+ bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
259
+ };
260
+
261
+ //
262
+ // llama_kv_cache_recurrent
263
+ //
264
+
265
+ class llama_kv_cache_recurrent : public llama_kv_cache {
266
+ public:
267
+ struct kv_cell {
268
+ llama_pos pos = -1;
269
+ int32_t src = -1; // used to copy states
270
+ int32_t tail = -1;
271
+
272
+ std::set<llama_seq_id> seq_id;
273
+
274
+ bool has_seq_id(const llama_seq_id & id) const {
275
+ return seq_id.find(id) != seq_id.end();
276
+ }
277
+
278
+ bool is_empty() const {
279
+ return seq_id.empty();
280
+ }
281
+
282
+ bool is_same_seq(const kv_cell & other) const {
283
+ return seq_id == other.seq_id;
284
+ }
285
+ };
286
+
287
+ llama_kv_cache_recurrent(
288
+ const llama_model & model,
289
+ lm_ggml_type type_k,
290
+ lm_ggml_type type_v,
291
+ bool offload,
292
+ uint32_t kv_size);
293
+
294
+ ~llama_kv_cache_recurrent() = default;
295
+
296
+ //
297
+ // llama_memory_i
298
+ //
299
+
300
+ void clear() override;
301
+
302
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
303
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
304
+ void seq_keep(llama_seq_id seq_id) override;
305
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
306
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
307
+
308
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
309
+
310
+ //
311
+ // llama_kv_cache
312
+ //
313
+
314
+ void restore() override;
315
+ void commit() override;
316
+
317
+ bool update(llama_context & lctx) override;
318
+
319
+ void defrag_sched(float thold) override;
320
+
321
+ void set_full() override;
322
+
323
+ llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
324
+
325
+ llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
326
+
327
+ bool find_slot(const llama_ubatch & batch) override;
328
+
329
+ int32_t get_n_tokens() const override;
330
+ int32_t get_used_cells() const override;
331
+
332
+ // TODO: better data structures to reduce the cost of this operation
333
+ llama_pos get_pos_max() const override;
334
+
335
+ bool get_can_shift() const override;
336
+
337
+ // TODO: temporary methods - they are not really const as they do const_cast<>, fix this
338
+ int32_t s_copy(int i) const;
339
+ float s_mask(int i) const;
340
+
341
+ // state write/load
342
+
343
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
344
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
345
+
346
+ // Note: The value of head isn't only used to optimize searching
347
+ // for a free KV slot. llama_decode_impl also uses it, so it
348
+ // cannot be freely changed after a slot has been allocated.
349
+ uint32_t head = 0;
350
+ uint32_t size = 0;
351
+ uint32_t used = 0; // used cells (i.e. at least one seq_id)
352
+
353
+ // computed before each graph build
354
+ uint32_t n = 0;
355
+
356
+ std::vector<kv_cell> cells;
357
+
358
+ std::vector<lm_ggml_tensor *> k_l; // per layer
359
+ std::vector<lm_ggml_tensor *> v_l;
360
+
361
+ private:
362
+ //const llama_model & model;
363
+ const llama_hparams & hparams;
364
+
365
+ // commit/restore cache
366
+ // TODO: rework for recurrent cache
367
+ struct slot_range {
368
+ uint32_t c0 = 0; // note: these are cell indices, not sequence positions
369
+ uint32_t c1 = 0;
370
+ };
371
+
372
+ // pending cell updates that are not yet committed
373
+ struct {
374
+ std::vector<slot_range> ranges;
375
+ } pending;
376
+
377
+ lm_ggml_type type_k = LM_GGML_TYPE_F16;
378
+ lm_ggml_type type_v = LM_GGML_TYPE_F16;
379
+
380
+ std::vector<lm_ggml_context_ptr> ctxs;
381
+ std::vector<lm_ggml_backend_buffer_ptr> bufs;
382
+
383
+ // find how many cells are currently in use
384
+ uint32_t cell_max() const;
385
+
386
+ size_t total_size() const;
387
+
388
+ size_t size_k_bytes() const;
389
+ size_t size_v_bytes() const;
390
+
391
+ void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
392
+ void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
393
+
394
+ bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
395
+ bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
396
+ };
397
+
398
+
399
+ //
400
+ // kv cache view
401
+ //
402
+
403
+ llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max);
404
+
405
+ void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv);
@@ -0,0 +1,31 @@
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+
5
+ struct llama_memory_params {
6
+ // kv cache
7
+ lm_ggml_type type_k;
8
+ lm_ggml_type type_v;
9
+
10
+ // parameters for other types of memory
11
+ // ...
12
+ };
13
+
14
+ // general concept of LLM memory
15
+ // the KV cache is a type of LLM memory, but there can be other types
16
+ class llama_memory_i {
17
+ public:
18
+ virtual ~llama_memory_i() = default;
19
+
20
+ virtual void clear() = 0;
21
+
22
+ virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0;
23
+ virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0;
24
+ virtual void seq_keep(llama_seq_id seq_id) = 0;
25
+ virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) = 0;
26
+ virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0;
27
+
28
+ virtual llama_pos seq_pos_max(llama_seq_id seq_id) const = 0;
29
+
30
+ virtual bool get_can_edit() const = 0;
31
+ };