@novastera-oss/llamarn 0.2.1 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (266) hide show
  1. package/README.md +80 -14
  2. package/RNLlamaCpp.podspec +10 -3
  3. package/android/CMakeLists.txt +8 -0
  4. package/android/src/main/cpp/include/llama.h +62 -125
  5. package/android/src/main/jniLibs/arm64-v8a/libggml-base.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/libggml-cpu.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/libggml.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/libllama.so +0 -0
  9. package/android/src/main/jniLibs/x86_64/libggml-base.so +0 -0
  10. package/android/src/main/jniLibs/x86_64/libggml-cpu.so +0 -0
  11. package/android/src/main/jniLibs/x86_64/libggml.so +0 -0
  12. package/android/src/main/jniLibs/x86_64/libllama.so +0 -0
  13. package/cpp/build-info.cpp +2 -2
  14. package/cpp/llama.cpp/README.md +11 -3
  15. package/cpp/llama.cpp/build-xcframework.sh +1 -0
  16. package/cpp/llama.cpp/common/CMakeLists.txt +8 -2
  17. package/cpp/llama.cpp/common/arg.cpp +153 -113
  18. package/cpp/llama.cpp/common/chat-parser.cpp +379 -0
  19. package/cpp/llama.cpp/common/chat-parser.h +117 -0
  20. package/cpp/llama.cpp/common/chat.cpp +847 -699
  21. package/cpp/llama.cpp/common/chat.h +73 -6
  22. package/cpp/llama.cpp/common/common.cpp +50 -82
  23. package/cpp/llama.cpp/common/common.h +21 -17
  24. package/cpp/llama.cpp/common/json-partial.cpp +255 -0
  25. package/cpp/llama.cpp/common/json-partial.h +37 -0
  26. package/cpp/llama.cpp/common/minja/chat-template.hpp +9 -5
  27. package/cpp/llama.cpp/common/minja/minja.hpp +69 -36
  28. package/cpp/llama.cpp/common/regex-partial.cpp +204 -0
  29. package/cpp/llama.cpp/common/regex-partial.h +56 -0
  30. package/cpp/llama.cpp/common/sampling.cpp +7 -8
  31. package/cpp/llama.cpp/convert_hf_to_gguf.py +453 -118
  32. package/cpp/llama.cpp/convert_hf_to_gguf_update.py +120 -68
  33. package/cpp/llama.cpp/ggml/CMakeLists.txt +2 -1
  34. package/cpp/llama.cpp/ggml/cmake/common.cmake +25 -0
  35. package/cpp/llama.cpp/ggml/include/ggml-opt.h +49 -28
  36. package/cpp/llama.cpp/ggml/include/ggml.h +26 -7
  37. package/cpp/llama.cpp/ggml/src/CMakeLists.txt +16 -10
  38. package/cpp/llama.cpp/ggml/src/ggml-backend.cpp +4 -1
  39. package/cpp/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +1 -0
  40. package/cpp/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +2 -0
  41. package/cpp/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +604 -0
  42. package/cpp/llama.cpp/ggml/src/ggml-cann/aclnn_ops.h +42 -0
  43. package/cpp/llama.cpp/ggml/src/ggml-cann/ggml-cann.cpp +54 -2
  44. package/cpp/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +50 -51
  45. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +2 -2
  46. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h +5 -9
  47. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +779 -19
  48. package/cpp/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +22 -0
  49. package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.cpp +88 -5
  50. package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kernels.h +47 -12
  51. package/cpp/llama.cpp/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +264 -69
  52. package/cpp/llama.cpp/ggml/src/ggml-cpu/ops.cpp +322 -100
  53. package/cpp/llama.cpp/ggml/src/ggml-cpu/simd-mappings.h +117 -1
  54. package/cpp/llama.cpp/ggml/src/ggml-cpu/vec.cpp +85 -16
  55. package/cpp/llama.cpp/ggml/src/ggml-cpu/vec.h +220 -49
  56. package/cpp/llama.cpp/ggml/src/ggml-cuda/acc.cu +40 -26
  57. package/cpp/llama.cpp/ggml/src/ggml-cuda/common.cuh +1 -1
  58. package/cpp/llama.cpp/ggml/src/ggml-cuda/cpy.cu +11 -1
  59. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-common.cuh +15 -7
  60. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-mma-f16.cuh +266 -64
  61. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-vec-f16.cuh +49 -4
  62. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn-vec-f32.cuh +48 -4
  63. package/cpp/llama.cpp/ggml/src/ggml-cuda/fattn.cu +2 -1
  64. package/cpp/llama.cpp/ggml/src/ggml-cuda/ggml-cuda.cu +5 -1
  65. package/cpp/llama.cpp/ggml/src/ggml-cuda/mmq.cu +2 -0
  66. package/cpp/llama.cpp/ggml/src/ggml-cuda/quantize.cu +7 -6
  67. package/cpp/llama.cpp/ggml/src/ggml-cuda/sum.cu +1 -1
  68. package/cpp/llama.cpp/ggml/src/ggml-cuda/unary.cu +10 -0
  69. package/cpp/llama.cpp/ggml/src/ggml-cuda/unary.cuh +2 -0
  70. package/cpp/llama.cpp/ggml/src/ggml-impl.h +1 -1
  71. package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +4 -0
  72. package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal.m +99 -17
  73. package/cpp/llama.cpp/ggml/src/ggml-metal/ggml-metal.metal +200 -2
  74. package/cpp/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +8 -2
  75. package/cpp/llama.cpp/ggml/src/ggml-musa/mudnn.cu +112 -0
  76. package/cpp/llama.cpp/ggml/src/ggml-musa/mudnn.cuh +12 -0
  77. package/cpp/llama.cpp/ggml/src/ggml-opencl/CMakeLists.txt +6 -0
  78. package/cpp/llama.cpp/ggml/src/ggml-opencl/ggml-opencl.cpp +972 -178
  79. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/argsort.cl +86 -0
  80. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/div.cl +72 -0
  81. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/group_norm.cl +72 -0
  82. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/sigmoid.cl +29 -0
  83. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/sub.cl +72 -0
  84. package/cpp/llama.cpp/ggml/src/ggml-opencl/kernels/sum_rows.cl +39 -0
  85. package/cpp/llama.cpp/ggml/src/ggml-opt.cpp +373 -190
  86. package/cpp/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +29 -23
  87. package/cpp/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp +5 -10
  88. package/cpp/llama.cpp/ggml/src/ggml-sycl/common.hpp +101 -5
  89. package/cpp/llama.cpp/ggml/src/ggml-sycl/concat.cpp +31 -33
  90. package/cpp/llama.cpp/ggml/src/ggml-sycl/conv.cpp +1 -0
  91. package/cpp/llama.cpp/ggml/src/ggml-sycl/convert.cpp +29 -2
  92. package/cpp/llama.cpp/ggml/src/ggml-sycl/cpy.cpp +4 -5
  93. package/cpp/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +59 -21
  94. package/cpp/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +9 -1
  95. package/cpp/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +84 -72
  96. package/cpp/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +2 -0
  97. package/cpp/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +37 -8
  98. package/cpp/llama.cpp/ggml/src/ggml-sycl/getrows.cpp +1 -3
  99. package/cpp/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp +324 -129
  100. package/cpp/llama.cpp/ggml/src/ggml-sycl/gla.cpp +1 -0
  101. package/cpp/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +31 -2
  102. package/cpp/llama.cpp/ggml/src/ggml-sycl/norm.cpp +95 -68
  103. package/cpp/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +1 -0
  104. package/cpp/llama.cpp/ggml/src/ggml-sycl/quants.hpp +22 -0
  105. package/cpp/llama.cpp/ggml/src/ggml-sycl/rope.cpp +1 -2
  106. package/cpp/llama.cpp/ggml/src/ggml-sycl/softmax.cpp +1 -4
  107. package/cpp/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +2 -3
  108. package/cpp/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +69 -43
  109. package/cpp/llama.cpp/ggml/src/ggml-sycl/wkv.cpp +2 -14
  110. package/cpp/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +81 -91
  111. package/cpp/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp +432 -181
  112. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +17 -0
  113. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_m.comp +1 -1
  114. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +6 -152
  115. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.comp +162 -0
  116. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +360 -0
  117. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +2 -118
  118. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +1 -1
  119. package/cpp/llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +12 -1
  120. package/cpp/llama.cpp/ggml/src/ggml.c +107 -36
  121. package/cpp/llama.cpp/ggml/src/gguf.cpp +33 -33
  122. package/cpp/llama.cpp/gguf-py/gguf/constants.py +100 -15
  123. package/cpp/llama.cpp/gguf-py/gguf/gguf_reader.py +1 -1
  124. package/cpp/llama.cpp/gguf-py/gguf/gguf_writer.py +44 -12
  125. package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_editor_gui.py +21 -10
  126. package/cpp/llama.cpp/gguf-py/gguf/scripts/gguf_new_metadata.py +5 -2
  127. package/cpp/llama.cpp/gguf-py/gguf/tensor_mapping.py +128 -31
  128. package/cpp/llama.cpp/gguf-py/gguf/utility.py +1 -1
  129. package/cpp/llama.cpp/gguf-py/pyproject.toml +1 -1
  130. package/cpp/llama.cpp/include/llama.h +62 -125
  131. package/cpp/llama.cpp/models/ggml-vocab-bert-bge.gguf.inp +1 -1
  132. package/cpp/llama.cpp/models/ggml-vocab-bert-bge.gguf.out +1 -1
  133. package/cpp/llama.cpp/models/ggml-vocab-command-r.gguf.inp +1 -1
  134. package/cpp/llama.cpp/models/ggml-vocab-command-r.gguf.out +1 -1
  135. package/cpp/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.inp +1 -1
  136. package/cpp/llama.cpp/models/ggml-vocab-deepseek-coder.gguf.out +1 -1
  137. package/cpp/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.inp +1 -1
  138. package/cpp/llama.cpp/models/ggml-vocab-deepseek-llm.gguf.out +1 -1
  139. package/cpp/llama.cpp/models/ggml-vocab-falcon.gguf.inp +1 -1
  140. package/cpp/llama.cpp/models/ggml-vocab-falcon.gguf.out +1 -1
  141. package/cpp/llama.cpp/models/ggml-vocab-gpt-2.gguf.inp +1 -1
  142. package/cpp/llama.cpp/models/ggml-vocab-gpt-2.gguf.out +1 -1
  143. package/cpp/llama.cpp/models/ggml-vocab-llama-bpe.gguf.inp +1 -1
  144. package/cpp/llama.cpp/models/ggml-vocab-llama-bpe.gguf.out +1 -1
  145. package/cpp/llama.cpp/models/ggml-vocab-llama-spm.gguf.inp +1 -1
  146. package/cpp/llama.cpp/models/ggml-vocab-llama-spm.gguf.out +1 -1
  147. package/cpp/llama.cpp/models/ggml-vocab-mpt.gguf.inp +1 -1
  148. package/cpp/llama.cpp/models/ggml-vocab-mpt.gguf.out +1 -1
  149. package/cpp/llama.cpp/models/ggml-vocab-nomic-bert-moe.gguf +0 -0
  150. package/cpp/llama.cpp/models/ggml-vocab-phi-3.gguf.inp +1 -1
  151. package/cpp/llama.cpp/models/ggml-vocab-phi-3.gguf.out +1 -1
  152. package/cpp/llama.cpp/models/ggml-vocab-qwen2.gguf.inp +1 -1
  153. package/cpp/llama.cpp/models/ggml-vocab-qwen2.gguf.out +1 -1
  154. package/cpp/llama.cpp/models/ggml-vocab-refact.gguf.inp +1 -1
  155. package/cpp/llama.cpp/models/ggml-vocab-refact.gguf.out +1 -1
  156. package/cpp/llama.cpp/models/ggml-vocab-starcoder.gguf.inp +1 -1
  157. package/cpp/llama.cpp/models/ggml-vocab-starcoder.gguf.out +1 -1
  158. package/cpp/llama.cpp/models/templates/Qwen-QwQ-32B.jinja +62 -0
  159. package/cpp/llama.cpp/models/templates/Qwen-Qwen3-0.6B.jinja +85 -0
  160. package/cpp/llama.cpp/models/templates/README.md +2 -0
  161. package/cpp/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt +5 -1
  162. package/cpp/llama.cpp/requirements/requirements-convert_hf_to_gguf_update.txt +5 -1
  163. package/cpp/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt +2 -0
  164. package/cpp/llama.cpp/requirements/requirements-gguf_editor_gui.txt +1 -1
  165. package/cpp/llama.cpp/src/CMakeLists.txt +2 -0
  166. package/cpp/llama.cpp/src/llama-arch.cpp +6 -0
  167. package/cpp/llama.cpp/src/llama-arch.h +2 -0
  168. package/cpp/llama.cpp/src/llama-batch.cpp +3 -1
  169. package/cpp/llama.cpp/src/llama-context.cpp +340 -123
  170. package/cpp/llama.cpp/src/llama-context.h +30 -0
  171. package/cpp/llama.cpp/src/llama-cparams.cpp +4 -0
  172. package/cpp/llama.cpp/src/llama-cparams.h +2 -0
  173. package/cpp/llama.cpp/src/llama-grammar.cpp +12 -2
  174. package/cpp/llama.cpp/src/llama-graph.cpp +157 -247
  175. package/cpp/llama.cpp/src/llama-graph.h +52 -7
  176. package/cpp/llama.cpp/src/llama-hparams.cpp +17 -1
  177. package/cpp/llama.cpp/src/llama-hparams.h +37 -5
  178. package/cpp/llama.cpp/src/llama-kv-cache.cpp +742 -481
  179. package/cpp/llama.cpp/src/llama-kv-cache.h +196 -99
  180. package/cpp/llama.cpp/src/llama-kv-cells.h +379 -0
  181. package/cpp/llama.cpp/src/llama-memory.h +4 -3
  182. package/cpp/llama.cpp/src/llama-model-loader.cpp +22 -17
  183. package/cpp/llama.cpp/src/llama-model-saver.cpp +281 -0
  184. package/cpp/llama.cpp/src/llama-model-saver.h +37 -0
  185. package/cpp/llama.cpp/src/llama-model.cpp +529 -172
  186. package/cpp/llama.cpp/src/llama-model.h +6 -1
  187. package/cpp/llama.cpp/src/llama-quant.cpp +15 -13
  188. package/cpp/llama.cpp/src/llama-sampling.cpp +2 -2
  189. package/cpp/llama.cpp/src/llama-vocab.cpp +35 -8
  190. package/cpp/llama.cpp/src/llama-vocab.h +6 -0
  191. package/cpp/llama.cpp/src/llama.cpp +14 -0
  192. package/cpp/rn-completion.cpp +4 -2
  193. package/ios/include/chat.h +73 -6
  194. package/ios/include/common/minja/chat-template.hpp +9 -5
  195. package/ios/include/common/minja/minja.hpp +69 -36
  196. package/ios/include/common.h +21 -17
  197. package/ios/include/llama.h +62 -125
  198. package/ios/libs/llama.xcframework/Info.plist +19 -19
  199. package/ios/libs/llama.xcframework/ios-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  200. package/ios/libs/llama.xcframework/ios-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4617 -4487
  201. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml-opt.h +237 -0
  202. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/ggml.h +26 -7
  203. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/Headers/llama.h +62 -125
  204. package/ios/libs/llama.xcframework/ios-arm64/llama.framework/llama +0 -0
  205. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  206. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4638 -4508
  207. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3557 -3435
  208. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml-opt.h +237 -0
  209. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/ggml.h +26 -7
  210. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/Headers/llama.h +62 -125
  211. package/ios/libs/llama.xcframework/ios-arm64_x86_64-simulator/llama.framework/llama +0 -0
  212. package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  213. package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4638 -4508
  214. package/ios/libs/llama.xcframework/macos-arm64_x86_64/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3559 -3437
  215. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml-opt.h +237 -0
  216. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/ggml.h +26 -7
  217. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Headers/llama.h +62 -125
  218. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml-opt.h +237 -0
  219. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/ggml.h +26 -7
  220. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/Headers/llama.h +62 -125
  221. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/A/llama +0 -0
  222. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml-opt.h +237 -0
  223. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/ggml.h +26 -7
  224. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/Headers/llama.h +62 -125
  225. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/Versions/Current/llama +0 -0
  226. package/ios/libs/llama.xcframework/macos-arm64_x86_64/llama.framework/llama +0 -0
  227. package/ios/libs/llama.xcframework/tvos-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  228. package/ios/libs/llama.xcframework/tvos-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4616 -4487
  229. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml-opt.h +237 -0
  230. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/ggml.h +26 -7
  231. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/Headers/llama.h +62 -125
  232. package/ios/libs/llama.xcframework/tvos-arm64/llama.framework/llama +0 -0
  233. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  234. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4637 -4508
  235. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3556 -3435
  236. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml-opt.h +237 -0
  237. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/ggml.h +26 -7
  238. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/Headers/llama.h +62 -125
  239. package/ios/libs/llama.xcframework/tvos-arm64_x86_64-simulator/llama.framework/llama +0 -0
  240. package/ios/libs/llama.xcframework/xros-arm64/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  241. package/ios/libs/llama.xcframework/xros-arm64/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4653 -4523
  242. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml-opt.h +237 -0
  243. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/ggml.h +26 -7
  244. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/Headers/llama.h +62 -125
  245. package/ios/libs/llama.xcframework/xros-arm64/llama.framework/llama +0 -0
  246. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/DWARF/llama +0 -0
  247. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/aarch64/llama.yml +4674 -4544
  248. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/dSYMs/llama.dSYM/Contents/Resources/Relocations/x86_64/llama.yml +3587 -3465
  249. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml-opt.h +237 -0
  250. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/ggml.h +26 -7
  251. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/Headers/llama.h +62 -125
  252. package/ios/libs/llama.xcframework/xros-arm64_x86_64-simulator/llama.framework/llama +0 -0
  253. package/package.json +1 -1
  254. package/cpp/llama.cpp/common/stb_image.h +0 -7988
  255. package/cpp/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +0 -112
  256. package/cpp/llama.cpp/models/ggml-vocab-chameleon.gguf.out +0 -46
  257. package/cpp/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +0 -112
  258. package/cpp/llama.cpp/models/ggml-vocab-deepseek-r1-qwen.gguf.out +0 -46
  259. package/cpp/llama.cpp/models/ggml-vocab-gpt-4o.gguf.inp +0 -112
  260. package/cpp/llama.cpp/models/ggml-vocab-gpt-4o.gguf.out +0 -46
  261. package/cpp/llama.cpp/models/ggml-vocab-llama4.gguf.inp +0 -112
  262. package/cpp/llama.cpp/models/ggml-vocab-llama4.gguf.out +0 -46
  263. package/cpp/llama.cpp/models/ggml-vocab-pixtral.gguf.inp +0 -112
  264. package/cpp/llama.cpp/models/ggml-vocab-pixtral.gguf.out +0 -46
  265. package/cpp/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.inp +0 -112
  266. package/cpp/llama.cpp/models/ggml-vocab-roberta-bpe.gguf.out +0 -46
@@ -4,10 +4,12 @@
4
4
  #include "llama-io.h"
5
5
  #include "llama-graph.h"
6
6
  #include "llama-memory.h"
7
+ #include "llama-kv-cells.h"
7
8
 
8
9
  #include "ggml-cpp.h"
9
10
 
10
11
  #include <set>
12
+ #include <unordered_map>
11
13
  #include <vector>
12
14
 
13
15
  struct llama_cparams;
@@ -34,12 +36,16 @@ struct llama_kv_cache : public llama_memory_i {
34
36
  virtual void defrag_sched(float thold) = 0;
35
37
 
36
38
  // simulate full cache, used for allocating worst-case compute buffers
39
+ // TODO: remove
37
40
  virtual void set_full() = 0;
38
41
 
39
42
  //
40
43
  // batch processing
41
44
  //
42
45
 
46
+ // =============================================================================================================
47
+ // TODO: refactor and simplify this [TAG: KV_API]
48
+
43
49
  virtual llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) = 0;
44
50
 
45
51
  // different KV caches require different batch splitting strategies
@@ -48,11 +54,10 @@ struct llama_kv_cache : public llama_memory_i {
48
54
  // find an empty slot of size "n_tokens" in the cache
49
55
  virtual bool find_slot(const llama_ubatch & batch) = 0;
50
56
 
57
+ // =============================================================================================================
58
+
51
59
  // getters
52
- virtual int32_t get_n_tokens() const = 0;
53
- virtual int32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache
54
- virtual llama_pos get_pos_max() const = 0;
55
- virtual bool get_can_shift() const = 0;
60
+ virtual bool get_can_shift() const = 0;
56
61
 
57
62
  bool get_can_edit() const override { return get_can_shift(); }
58
63
 
@@ -87,38 +92,25 @@ private:
87
92
  // llama_kv_cache_unified
88
93
  //
89
94
 
90
- // TODO: add notion of max sequences
91
95
  class llama_kv_cache_unified : public llama_kv_cache {
92
96
  public:
93
- struct kv_cell {
94
- llama_pos pos = -1;
95
- llama_pos delta = 0;
96
-
97
- std::set<llama_seq_id> seq_id;
98
-
99
- bool has_seq_id(const llama_seq_id & id) const {
100
- return seq_id.find(id) != seq_id.end();
101
- }
102
-
103
- bool is_empty() const {
104
- return seq_id.empty();
105
- }
106
-
107
- bool is_same_seq(const kv_cell & other) const {
108
- return seq_id == other.seq_id;
109
- }
110
- };
111
-
112
97
  static uint32_t get_padding(const llama_cparams & cparams);
113
98
 
99
+ // this callback is used to filter out layers that should not be included in the cache
100
+ using layer_filter_cb = std::function<bool(int32_t il)>;
101
+
114
102
  llama_kv_cache_unified(
115
- const llama_model & model,
116
- ggml_type type_k,
117
- ggml_type type_v,
118
- bool v_trans,
119
- bool offload,
120
- uint32_t kv_size,
121
- uint32_t padding);
103
+ const llama_model & model,
104
+ layer_filter_cb && filter,
105
+ ggml_type type_k,
106
+ ggml_type type_v,
107
+ bool v_trans,
108
+ bool offload,
109
+ uint32_t kv_size,
110
+ uint32_t n_seq_max,
111
+ uint32_t n_pad,
112
+ uint32_t n_swa,
113
+ llama_swa_type swa_type);
122
114
 
123
115
  ~llama_kv_cache_unified() = default;
124
116
 
@@ -130,10 +122,11 @@ public:
130
122
 
131
123
  bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
132
124
  void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
133
- void seq_keep(llama_seq_id seq_id) override;
134
- void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
125
+ void seq_keep(llama_seq_id seq_id) override;
126
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
135
127
  void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
136
128
 
129
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
137
130
  llama_pos seq_pos_max(llama_seq_id seq_id) const override;
138
131
 
139
132
  //
@@ -150,7 +143,6 @@ public:
150
143
  void set_full() override;
151
144
 
152
145
  llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
153
-
154
146
  llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
155
147
 
156
148
  // updates the cache head
@@ -158,53 +150,94 @@ public:
158
150
  // to the first cell of the slot.
159
151
  bool find_slot(const llama_ubatch & batch) override;
160
152
 
161
- int32_t get_n_tokens() const override;
162
- int32_t get_used_cells() const override;
163
-
164
- // TODO: better data structures to reduce the cost of this operation
165
- llama_pos get_pos_max() const override;
166
-
167
153
  bool get_can_shift() const override;
168
154
 
169
155
  // state write/load
170
156
 
171
157
  void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
172
- void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
158
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
173
159
 
174
- // Note: The value of head isn't only used to optimize searching
175
- // for a free KV slot. llama_decode_impl also uses it, so it
176
- // cannot be freely changed after a slot has been allocated.
177
- uint32_t head = 0;
178
- uint32_t size = 0;
179
- uint32_t used = 0; // used cells (i.e. at least one seq_id)
160
+ //
161
+ // llama_kv_cache_unified specific API
162
+ //
180
163
 
181
- // computed before each graph build
182
- uint32_t n = 0;
164
+ uint32_t get_n() const;
165
+ uint32_t get_size() const;
183
166
 
184
- std::vector<kv_cell> cells;
167
+ // get views of the current state of the cache
168
+ ggml_tensor * get_k(ggml_context * ctx, int32_t il) const;
169
+ ggml_tensor * get_v(ggml_context * ctx, int32_t il) const;
185
170
 
186
- std::vector<ggml_tensor *> k_l; // per layer
187
- std::vector<ggml_tensor *> v_l;
171
+ // store k_cur and v_cur in the cache based on the current head location
172
+ ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, int32_t il) const;
173
+ ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, int32_t il) const;
174
+
175
+ void prune_swa(llama_seq_id seq_id, llama_pos pmin, llama_pos pmax);
176
+
177
+ void set_input_kq_mask (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
178
+ void set_input_k_shift (ggml_tensor * dst) const;
179
+ void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const;
188
180
 
189
181
  private:
190
182
  const llama_model & model;
191
183
  const llama_hparams & hparams;
192
184
 
193
- bool has_shift = false;
194
- bool do_defrag = false;
185
+ struct kv_layer {
186
+ // layer index in the model
187
+ // note: can be different from the layer index in the KV cache
188
+ uint32_t il;
189
+
190
+ ggml_tensor * k;
191
+ ggml_tensor * v;
192
+ };
195
193
 
194
+ bool do_defrag = false;
196
195
  bool v_trans = true; // the value tensor is transposed
197
- bool can_shift = false;
196
+
197
+ uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
198
+
199
+ // computed before each graph build
200
+ // TODO: cells should start to maintain this value dynamically based on the edits
201
+ uint32_t n = 0;
202
+
203
+ const uint32_t n_seq_max = 1;
198
204
 
199
205
  // required padding
200
- uint32_t padding = 1;
206
+ const uint32_t n_pad = 1;
207
+
208
+ // SWA
209
+ const uint32_t n_swa = 0;
201
210
 
202
- ggml_type type_k = GGML_TYPE_F16;
203
- ggml_type type_v = GGML_TYPE_F16;
211
+ const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
204
212
 
205
213
  std::vector<ggml_context_ptr> ctxs;
206
214
  std::vector<ggml_backend_buffer_ptr> bufs;
207
215
 
216
+ llama_kv_cells_unified cells;
217
+
218
+ std::vector<kv_layer> layers;
219
+
220
+ // model layer id -> KV cache layer id
221
+ std::unordered_map<int32_t, int32_t> map_layer_ids;
222
+
223
+ // recovery information used to restore the KV cells to their original state in case of a failure
224
+ // TODO: do not store as a state in the llama_kv_cache object, instead return upon batch preparation
225
+ // to achieve that, first need to refactor the llama_kv_cache interface [TAG: KV_API]
226
+ struct {
227
+ void clear() {
228
+ states.clear();
229
+ }
230
+
231
+ struct state {
232
+ uint32_t i;
233
+
234
+ llama_kv_cells_unified cells;
235
+ };
236
+
237
+ // stack with the partial states before each ubatch
238
+ std::vector<state> states;
239
+ } recovery;
240
+
208
241
  // defrag
209
242
  struct {
210
243
  std::vector<uint32_t> ids;
@@ -213,25 +246,13 @@ private:
213
246
  // return true if cells have been moved
214
247
  bool defrag_prepare(int32_t n_max_nodes);
215
248
 
216
- // commit/restore cache
217
- struct slot_range {
218
- uint32_t c0 = 0; // note: these are cell indices, not sequence positions
219
- uint32_t c1 = 0;
220
- };
221
-
222
- // pending cell updates that are not yet committed
223
- struct {
224
- std::vector<slot_range> ranges;
225
- } pending;
226
-
227
- // find how many cells are currently in use
228
- uint32_t cell_max() const;
229
-
230
249
  size_t total_size() const;
231
250
 
232
251
  size_t size_k_bytes() const;
233
252
  size_t size_v_bytes() const;
234
253
 
254
+ bool is_masked_swa(llama_pos p0, llama_pos p1) const;
255
+
235
256
  ggml_tensor * build_rope_shift(
236
257
  const llama_cparams & cparams,
237
258
  ggml_context * ctx,
@@ -258,6 +279,100 @@ private:
258
279
  bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
259
280
  };
260
281
 
282
+ //
283
+ // llama_kv_cache_unified_iswa
284
+ //
285
+
286
+ // utilizes two instances of llama_kv_cache_unified
287
+ // the first instance is for the non-SWA layers of the model and the second instance is for the SWA layers
288
+ // upon successful commit, the SWA cache removes old tokens outside the n_swa window
289
+
290
+ class llama_kv_cache_unified_iswa : public llama_kv_cache {
291
+ public:
292
+ llama_kv_cache_unified_iswa(
293
+ const llama_model & model,
294
+ ggml_type type_k,
295
+ ggml_type type_v,
296
+ bool v_trans,
297
+ bool offload,
298
+ bool swa_full,
299
+ uint32_t kv_size,
300
+ uint32_t n_seq_max,
301
+ uint32_t n_batch,
302
+ uint32_t n_pad);
303
+
304
+ ~llama_kv_cache_unified_iswa() = default;
305
+
306
+ //
307
+ // llama_memory_i
308
+ //
309
+
310
+ void clear() override;
311
+
312
+ bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
313
+ void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
314
+ void seq_keep(llama_seq_id seq_id) override;
315
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
316
+ void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
317
+
318
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
319
+ llama_pos seq_pos_max(llama_seq_id seq_id) const override;
320
+
321
+ //
322
+ // llama_kv_cache
323
+ //
324
+
325
+ void restore() override;
326
+ void commit() override;
327
+
328
+ bool update(llama_context & ctx) override;
329
+
330
+ void defrag_sched(float thold) override;
331
+
332
+ void set_full() override;
333
+
334
+ llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
335
+ llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
336
+
337
+ bool find_slot(const llama_ubatch & batch) override;
338
+
339
+ bool get_can_shift() const override;
340
+
341
+ // state write/load
342
+
343
+ void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
344
+ void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
345
+
346
+ //
347
+ // llama_kv_cache_unified_iswa specific API
348
+ //
349
+
350
+ llama_kv_cache_unified * get_kv_base() const;
351
+ llama_kv_cache_unified * get_kv_swa () const;
352
+
353
+ private:
354
+ const llama_hparams & hparams;
355
+
356
+ bool do_prune = true;
357
+
358
+ struct {
359
+ struct entry {
360
+ llama_pos pmin;
361
+ llama_pos pmax;
362
+ };
363
+
364
+ void clear() {
365
+ pos.clear();
366
+ }
367
+
368
+ // used to perform SWA pruning of old tokens
369
+ std::unordered_map<llama_seq_id, entry> pos;
370
+ } pending;
371
+
372
+ std::unique_ptr<llama_kv_cache_unified> kv_base;
373
+ std::unique_ptr<llama_kv_cache_unified> kv_swa;
374
+ };
375
+
261
376
  //
262
377
  // llama_kv_cache_recurrent
263
378
  //
@@ -289,7 +404,8 @@ public:
289
404
  ggml_type type_k,
290
405
  ggml_type type_v,
291
406
  bool offload,
292
- uint32_t kv_size);
407
+ uint32_t kv_size,
408
+ uint32_t n_seq_max);
293
409
 
294
410
  ~llama_kv_cache_recurrent() = default;
295
411
 
@@ -301,10 +417,11 @@ public:
301
417
 
302
418
  bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
303
419
  void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
304
- void seq_keep(llama_seq_id seq_id) override;
305
- void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
420
+ void seq_keep(llama_seq_id seq_id) override;
421
+ void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
306
422
  void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
307
423
 
424
+ llama_pos seq_pos_min(llama_seq_id seq_id) const override;
308
425
  llama_pos seq_pos_max(llama_seq_id seq_id) const override;
309
426
 
310
427
  //
@@ -314,24 +431,17 @@ public:
314
431
  void restore() override;
315
432
  void commit() override;
316
433
 
317
- bool update(llama_context & lctx) override;
434
+ bool update(llama_context & ctx) override;
318
435
 
319
436
  void defrag_sched(float thold) override;
320
437
 
321
438
  void set_full() override;
322
439
 
323
440
  llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override;
324
-
325
441
  llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override;
326
442
 
327
443
  bool find_slot(const llama_ubatch & batch) override;
328
444
 
329
- int32_t get_n_tokens() const override;
330
- int32_t get_used_cells() const override;
331
-
332
- // TODO: better data structures to reduce the cost of this operation
333
- llama_pos get_pos_max() const override;
334
-
335
445
  bool get_can_shift() const override;
336
446
 
337
447
  // TODO: temporary methods - they are not really const as they do const_cast<>, fix this
@@ -343,11 +453,8 @@ public:
343
453
  void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
344
454
  void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
345
455
 
346
- // Note: The value of head isn't only used to optimize searching
347
- // for a free KV slot. llama_decode_impl also uses it, so it
348
- // cannot be freely changed after a slot has been allocated.
349
- uint32_t head = 0;
350
- uint32_t size = 0;
456
+ uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot())
457
+ uint32_t size = 0; // total number of cells, shared across all sequences
351
458
  uint32_t used = 0; // used cells (i.e. at least one seq_id)
352
459
 
353
460
  // computed before each graph build
@@ -374,8 +481,7 @@ private:
374
481
  std::vector<slot_range> ranges;
375
482
  } pending;
376
483
 
377
- ggml_type type_k = GGML_TYPE_F16;
378
- ggml_type type_v = GGML_TYPE_F16;
484
+ const uint32_t n_seq_max = 1;
379
485
 
380
486
  std::vector<ggml_context_ptr> ctxs;
381
487
  std::vector<ggml_backend_buffer_ptr> bufs;
@@ -394,12 +500,3 @@ private:
394
500
  bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
395
501
  bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
396
502
  };
397
-
398
-
399
- //
400
- // kv cache view
401
- //
402
-
403
- llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max);
404
-
405
- void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv);