cui-llama.rn 1.5.0 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (309) hide show
  1. package/LICENSE +20 -20
  2. package/README.md +317 -319
  3. package/android/build.gradle +116 -116
  4. package/android/gradle.properties +5 -5
  5. package/android/src/main/AndroidManifest.xml +4 -4
  6. package/android/src/main/CMakeLists.txt +124 -124
  7. package/android/src/main/java/com/rnllama/LlamaContext.java +645 -645
  8. package/android/src/main/java/com/rnllama/RNLlama.java +695 -695
  9. package/android/src/main/java/com/rnllama/RNLlamaPackage.java +48 -48
  10. package/android/src/main/jni-utils.h +100 -100
  11. package/android/src/main/jni.cpp +1263 -1263
  12. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  13. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  14. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  15. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  16. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  17. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  18. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  19. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  20. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +135 -135
  21. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +136 -136
  22. package/cpp/README.md +4 -4
  23. package/cpp/ggml-llama-sim.metallib +0 -0
  24. package/cpp/ggml-llama.metallib +0 -0
  25. package/cpp/ggml-metal-impl.h +597 -597
  26. package/cpp/ggml-metal.m +4 -0
  27. package/cpp/ggml.h +1 -1
  28. package/cpp/rn-llama.cpp +873 -873
  29. package/cpp/rn-llama.h +138 -138
  30. package/cpp/sampling.h +107 -107
  31. package/cpp/unicode-data.cpp +7034 -7034
  32. package/cpp/unicode-data.h +20 -20
  33. package/cpp/unicode.cpp +849 -849
  34. package/cpp/unicode.h +66 -66
  35. package/ios/CMakeLists.txt +116 -108
  36. package/ios/RNLlama.h +7 -7
  37. package/ios/RNLlama.mm +418 -405
  38. package/ios/RNLlamaContext.h +57 -57
  39. package/ios/RNLlamaContext.mm +835 -835
  40. package/ios/rnllama.xcframework/Info.plist +74 -74
  41. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/binary-ops.h +16 -0
  42. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +143 -0
  43. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +677 -0
  44. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
  45. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
  46. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  47. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
  48. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
  49. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
  50. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
  51. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
  52. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
  53. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
  54. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +138 -0
  55. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +594 -0
  56. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  57. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
  58. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
  59. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
  60. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
  61. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +2222 -0
  62. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/gguf.h +202 -0
  63. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  64. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json.hpp +24766 -0
  65. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
  66. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +428 -0
  67. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +88 -0
  68. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +56 -0
  69. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +265 -0
  70. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
  71. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
  72. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
  73. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +592 -0
  74. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +156 -0
  75. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
  76. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-io.h +35 -0
  77. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +213 -0
  78. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +21 -0
  79. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
  80. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
  81. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +409 -0
  82. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
  83. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
  84. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +1434 -0
  85. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/log.h +132 -0
  86. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  87. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  88. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ops.h +128 -0
  89. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +138 -0
  90. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sampling.h +107 -0
  91. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sgemm.h +14 -0
  92. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/simd-mappings.h +888 -0
  93. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/speculative.h +28 -0
  94. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unary-ops.h +28 -0
  95. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
  96. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode.h +66 -0
  97. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/vec.h +802 -0
  98. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
  99. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  100. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
  101. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/binary-ops.h +16 -0
  102. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
  103. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +677 -0
  104. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
  105. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
  106. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  107. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
  108. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
  109. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
  110. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
  111. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
  112. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
  113. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
  114. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +138 -0
  115. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +594 -0
  116. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  117. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
  118. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
  119. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
  120. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
  121. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2222 -0
  122. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
  123. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  124. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
  125. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
  126. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +428 -0
  127. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +88 -0
  128. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +56 -0
  129. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +265 -0
  130. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
  131. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
  132. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
  133. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +592 -0
  134. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +156 -0
  135. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
  136. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
  137. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +213 -0
  138. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +21 -0
  139. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
  140. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
  141. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +409 -0
  142. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
  143. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
  144. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1434 -0
  145. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
  146. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  147. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  148. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ops.h +128 -0
  149. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +138 -0
  150. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
  151. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +14 -0
  152. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/simd-mappings.h +888 -0
  153. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
  154. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +28 -0
  155. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
  156. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
  157. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +802 -0
  158. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  159. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
  160. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  161. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  162. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/binary-ops.h +16 -0
  163. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +143 -0
  164. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +677 -0
  165. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
  166. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
  167. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  168. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
  169. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
  170. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
  171. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
  172. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
  173. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
  174. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
  175. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +138 -0
  176. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +594 -0
  177. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  178. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
  179. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
  180. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
  181. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
  182. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +2222 -0
  183. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/gguf.h +202 -0
  184. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  185. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json.hpp +24766 -0
  186. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
  187. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +428 -0
  188. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +88 -0
  189. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +56 -0
  190. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +265 -0
  191. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
  192. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
  193. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
  194. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +592 -0
  195. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +156 -0
  196. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
  197. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-io.h +35 -0
  198. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +213 -0
  199. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +21 -0
  200. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
  201. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
  202. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +409 -0
  203. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
  204. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
  205. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +1434 -0
  206. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/log.h +132 -0
  207. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  208. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  209. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ops.h +128 -0
  210. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +138 -0
  211. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sampling.h +107 -0
  212. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sgemm.h +14 -0
  213. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/simd-mappings.h +888 -0
  214. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/speculative.h +28 -0
  215. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unary-ops.h +28 -0
  216. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
  217. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode.h +66 -0
  218. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/vec.h +802 -0
  219. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
  220. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  221. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
  222. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/binary-ops.h +16 -0
  223. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
  224. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +677 -0
  225. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
  226. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
  227. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  228. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
  229. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
  230. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
  231. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
  232. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
  233. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
  234. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
  235. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +138 -0
  236. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +594 -0
  237. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  238. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
  239. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
  240. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
  241. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
  242. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2222 -0
  243. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
  244. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  245. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
  246. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
  247. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +428 -0
  248. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +88 -0
  249. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +56 -0
  250. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +265 -0
  251. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
  252. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
  253. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
  254. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +592 -0
  255. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +156 -0
  256. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
  257. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
  258. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +213 -0
  259. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +21 -0
  260. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
  261. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
  262. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +409 -0
  263. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
  264. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
  265. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1434 -0
  266. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
  267. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  268. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  269. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ops.h +128 -0
  270. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +138 -0
  271. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
  272. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +14 -0
  273. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/simd-mappings.h +888 -0
  274. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
  275. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +28 -0
  276. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
  277. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
  278. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +802 -0
  279. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  280. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
  281. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  282. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  283. package/jest/mock.js +203 -203
  284. package/lib/commonjs/NativeRNLlama.js +1 -2
  285. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  286. package/lib/commonjs/chat.js.map +1 -1
  287. package/lib/commonjs/grammar.js +12 -31
  288. package/lib/commonjs/grammar.js.map +1 -1
  289. package/lib/commonjs/index.js +47 -47
  290. package/lib/commonjs/index.js.map +1 -1
  291. package/lib/commonjs/package.json +1 -0
  292. package/lib/module/NativeRNLlama.js +2 -0
  293. package/lib/module/NativeRNLlama.js.map +1 -1
  294. package/lib/module/chat.js +2 -0
  295. package/lib/module/chat.js.map +1 -1
  296. package/lib/module/grammar.js +14 -31
  297. package/lib/module/grammar.js.map +1 -1
  298. package/lib/module/index.js +47 -45
  299. package/lib/module/index.js.map +1 -1
  300. package/lib/module/package.json +1 -0
  301. package/lib/typescript/NativeRNLlama.d.ts +6 -4
  302. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  303. package/lib/typescript/index.d.ts.map +1 -1
  304. package/llama-rn.podspec +48 -48
  305. package/package.json +233 -233
  306. package/src/NativeRNLlama.ts +426 -426
  307. package/src/chat.ts +44 -44
  308. package/src/grammar.ts +854 -854
  309. package/src/index.ts +495 -487
@@ -0,0 +1,592 @@
1
+ #pragma once
2
+
3
+ #include "llama-arch.h"
4
+ #include "llama-hparams.h"
5
+ #include "llama-adapter.h"
6
+
7
+ #include <cstdint>
8
+ #include <vector>
9
+ #include <memory>
10
+ #include <set>
11
+ #include <functional>
12
+
13
+ struct lm_ggml_cgraph;
14
+ struct lm_ggml_context;
15
+ struct lm_ggml_tensor;
16
+
17
+ struct llama_ubatch;
18
+ struct llama_cparams;
19
+
20
+ class llama_memory_i;
21
+ class llama_kv_cache_unified;
22
+
23
+ // certain models (typically multi-modal) can produce different types of graphs
24
+ enum llm_graph_type {
25
+ LLM_GRAPH_TYPE_DEFAULT,
26
+ LLM_GRAPH_TYPE_ENCODER,
27
+ LLM_GRAPH_TYPE_DECODER,
28
+ };
29
+
30
+ enum llm_ffn_op_type {
31
+ LLM_FFN_SILU,
32
+ LLM_FFN_GELU,
33
+ LLM_FFN_RELU,
34
+ LLM_FFN_RELU_SQR,
35
+ LLM_FFN_SWIGLU,
36
+ };
37
+
38
+ enum llm_ffn_gate_type {
39
+ LLM_FFN_SEQ,
40
+ LLM_FFN_PAR, // ffn_gate is parallel to ffn_up
41
+ };
42
+
43
+ enum llm_norm_type {
44
+ LLM_NORM,
45
+ LLM_NORM_RMS,
46
+ LLM_NORM_GROUP,
47
+ };
48
+
49
+ // TODO: tmp - need something better to pass the data from the encoder to the decoder
50
+ struct llama_cross {
51
+ // the output embeddings from the encoder as a ggml tensor
52
+ // TODO: this needs more work to be correct, for now copy the embeddings data to host memory
53
+ // ref: https://github.com/ggml-org/llama.cpp/pull/11213#discussion_r1969892524
54
+ //lm_ggml_tensor * t_embd = nullptr;
55
+
56
+ int64_t n_embd = 0;
57
+ int64_t n_enc = 0;
58
+
59
+ // embeddings data copied to host memory (tmp)
60
+ std::vector<float> v_embd;
61
+
62
+ // needed to construct the cross-attention mask in the decoder
63
+ std::vector<std::set<llama_seq_id>> seq_ids_enc;
64
+ };
65
+
66
+ //
67
+ // llm_graph_input
68
+ //
69
+
70
+ class llm_graph_input_i {
71
+ public:
72
+ virtual ~llm_graph_input_i() = default;
73
+
74
+ virtual void set_input(const llama_ubatch * ubatch) = 0;
75
+ };
76
+
77
+ using llm_graph_input_ptr = std::unique_ptr<llm_graph_input_i>;
78
+
79
+
80
+ class llm_graph_input_embd : public llm_graph_input_i {
81
+ public:
82
+ llm_graph_input_embd() = default;
83
+ virtual ~llm_graph_input_embd() = default;
84
+
85
+ void set_input(const llama_ubatch * ubatch) override;
86
+
87
+ lm_ggml_tensor * tokens = nullptr; // I32 [n_batch]
88
+ lm_ggml_tensor * embd = nullptr; // F32 [n_embd, n_batch]
89
+ };
90
+
91
+ class llm_graph_input_pos : public llm_graph_input_i {
92
+ public:
93
+ llm_graph_input_pos(int64_t n_pos_per_token) : n_pos_per_token(n_pos_per_token) {}
94
+ virtual ~llm_graph_input_pos() = default;
95
+
96
+ void set_input(const llama_ubatch * ubatch) override;
97
+
98
+ lm_ggml_tensor * pos = nullptr; // I32 [n_batch]
99
+
100
+ const int64_t n_pos_per_token = 1;
101
+ };
102
+
103
+ // temperature tuning, used by llama4
104
+ class llm_graph_input_attn_temp : public llm_graph_input_i {
105
+ public:
106
+ llm_graph_input_attn_temp(int64_t n_pos_per_token, uint32_t n_attn_temp_floor_scale, float f_attn_temp_scale)
107
+ : n_pos_per_token(n_pos_per_token), n_attn_temp_floor_scale(n_attn_temp_floor_scale), f_attn_temp_scale(f_attn_temp_scale) {}
108
+ virtual ~llm_graph_input_attn_temp() = default;
109
+
110
+ void set_input(const llama_ubatch * ubatch) override;
111
+
112
+ lm_ggml_tensor * attn_scale = nullptr; // F32 [n_batch]
113
+
114
+ const int64_t n_pos_per_token = 1;
115
+
116
+ const uint32_t n_attn_temp_floor_scale;
117
+ const float f_attn_temp_scale;
118
+ };
119
+
120
+ class llm_graph_input_pos_bucket : public llm_graph_input_i {
121
+ public:
122
+ llm_graph_input_pos_bucket(const llama_hparams & hparams) : hparams(hparams) {}
123
+ virtual ~llm_graph_input_pos_bucket() = default;
124
+
125
+ void set_input(const llama_ubatch * ubatch) override;
126
+
127
+ lm_ggml_tensor * pos_bucket = nullptr; // I32 [n_batch, n_batch]
128
+
129
+ const llama_hparams & hparams;
130
+ };
131
+
132
+ class llm_graph_input_pos_bucket_kv : public llm_graph_input_i {
133
+ public:
134
+ llm_graph_input_pos_bucket_kv(
135
+ const llama_hparams & hparams,
136
+ const llama_kv_cache_unified * kv_self) : hparams(hparams), kv_self(kv_self) {}
137
+ virtual ~llm_graph_input_pos_bucket_kv() = default;
138
+
139
+ void set_input(const llama_ubatch * ubatch) override;
140
+
141
+ lm_ggml_tensor * pos_bucket = nullptr; // I32 [n_kv, n_batch]
142
+
143
+ const llama_hparams & hparams;
144
+ const llama_kv_cache_unified * kv_self;
145
+ };
146
+
147
+ class llm_graph_input_out_ids : public llm_graph_input_i {
148
+ public:
149
+ llm_graph_input_out_ids(
150
+ const llama_hparams & hparams,
151
+ const llama_cparams & cparams,
152
+ int32_t n_outputs) : hparams(hparams), cparams(cparams), n_outputs(n_outputs) {}
153
+ virtual ~llm_graph_input_out_ids() = default;
154
+
155
+ void set_input(const llama_ubatch * ubatch) override;
156
+
157
+ lm_ggml_tensor * out_ids; // I32 [n_outputs]
158
+
159
+ const llama_hparams & hparams;
160
+ const llama_cparams & cparams;
161
+
162
+ const int32_t n_outputs;
163
+ };
164
+
165
+ class llm_graph_input_mean : public llm_graph_input_i {
166
+ public:
167
+ llm_graph_input_mean(const llama_cparams & cparams) : cparams(cparams) {}
168
+ virtual ~llm_graph_input_mean() = default;
169
+
170
+ void set_input(const llama_ubatch * ubatch) override;
171
+
172
+ lm_ggml_tensor * mean; // F32 [n_batch, n_batch]
173
+
174
+ const llama_cparams & cparams;
175
+ };
176
+
177
+ class llm_graph_input_cls : public llm_graph_input_i {
178
+ public:
179
+ llm_graph_input_cls(const llama_cparams & cparams) : cparams(cparams) {}
180
+ virtual ~llm_graph_input_cls() = default;
181
+
182
+ void set_input(const llama_ubatch * ubatch) override;
183
+
184
+ lm_ggml_tensor * cls; // I32 [n_batch]
185
+
186
+ const llama_cparams & cparams;
187
+ };
188
+
189
+ class llm_graph_input_s_copy : public llm_graph_input_i {
190
+ public:
191
+ llm_graph_input_s_copy(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {}
192
+ virtual ~llm_graph_input_s_copy() = default;
193
+
194
+ void set_input(const llama_ubatch * ubatch) override;
195
+
196
+ lm_ggml_tensor * s_copy; // I32 [kv_size]
197
+
198
+ const llama_kv_cache_unified * kv_self;
199
+ };
200
+
201
+ class llm_graph_input_s_mask : public llm_graph_input_i {
202
+ public:
203
+ llm_graph_input_s_mask(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {}
204
+ virtual ~llm_graph_input_s_mask() = default;
205
+
206
+ void set_input(const llama_ubatch * ubatch) override;
207
+
208
+ lm_ggml_tensor * s_mask; // F32 [1, n_kv]
209
+
210
+ const llama_kv_cache_unified * kv_self;
211
+ };
212
+
213
+ class llm_graph_input_cross_embd : public llm_graph_input_i {
214
+ public:
215
+ llm_graph_input_cross_embd(
216
+ const llama_cross * cross) : cross(cross) {}
217
+ virtual ~llm_graph_input_cross_embd() = default;
218
+
219
+ void set_input(const llama_ubatch * ubatch) override;
220
+
221
+ lm_ggml_tensor * cross_embd; // F32 [n_embd, n_outputs_enc]
222
+
223
+ const llama_cross * cross;
224
+ };
225
+
226
+ class llm_graph_input_attn_no_cache : public llm_graph_input_i {
227
+ public:
228
+ llm_graph_input_attn_no_cache(const llama_hparams & hparams, const llama_cparams & cparams) :
229
+ hparams(hparams),
230
+ cparams(cparams) {
231
+ }
232
+ ~llm_graph_input_attn_no_cache() = default;
233
+
234
+ void set_input(const llama_ubatch * ubatch) override;
235
+
236
+ lm_ggml_tensor * get_kq_mask() const { return kq_mask_cnv; }
237
+
238
+ lm_ggml_tensor * kq_mask = nullptr; // F32 [n_tokens, n_batch]
239
+ lm_ggml_tensor * kq_mask_cnv = nullptr; // [n_tokens, n_batch]
240
+
241
+ const llama_hparams & hparams;
242
+ const llama_cparams & cparams;
243
+ };
244
+
245
+ class llm_graph_input_attn_kv_unified : public llm_graph_input_i {
246
+ public:
247
+ llm_graph_input_attn_kv_unified(
248
+ const llama_hparams & hparams,
249
+ const llama_cparams & cparams,
250
+ const llama_kv_cache_unified * kv_self) :
251
+ hparams(hparams),
252
+ cparams(cparams),
253
+ kv_self(kv_self) {
254
+ }
255
+ ~llm_graph_input_attn_kv_unified() = default;
256
+
257
+ void set_input(const llama_ubatch * ubatch) override;
258
+
259
+ lm_ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; }
260
+ lm_ggml_tensor * get_kq_mask_swa() const { return self_kq_mask_swa_cnv; }
261
+
262
+ lm_ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch]
263
+ lm_ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch]
264
+ lm_ggml_tensor * self_kq_mask_swa = nullptr; // F32 [n_kv, n_batch]
265
+ lm_ggml_tensor * self_kq_mask_swa_cnv = nullptr; // [n_kv, n_batch]
266
+
267
+ const llama_hparams & hparams;
268
+ const llama_cparams & cparams;
269
+
270
+ const llama_kv_cache_unified * kv_self;
271
+ };
272
+
273
+ class llm_graph_input_attn_cross : public llm_graph_input_i {
274
+ public:
275
+ llm_graph_input_attn_cross(const llama_cross * cross) : cross(cross) {}
276
+ ~llm_graph_input_attn_cross() = default;
277
+
278
+ void set_input(const llama_ubatch * ubatch) override;
279
+
280
+ lm_ggml_tensor * get_kq_mask_cross() const { return cross_kq_mask_cnv; }
281
+
282
+ lm_ggml_tensor * cross_kq_mask = nullptr; // F32 [n_outputs_enc, n_batch]
283
+ lm_ggml_tensor * cross_kq_mask_cnv = nullptr; // F32 [n_outputs_enc, n_batch]
284
+
285
+ const llama_cross * cross = nullptr;
286
+ };
287
+
288
+ //
289
+ // llm_graph_result
290
+ //
291
+
292
+ // these objects deliver the result from the graph build process back to the llama_context
293
+ // note that the input tensors created for the graph are referenced here - the goal is to be able to populate their
294
+ // specific data, by calling the set_inputs() method
295
+ // along with the input tensors, the object also provides commonly used outputs tensors, such as logits, embeddings, etc.
296
+ // these are used by the llama_context to extact the relevant data, based on the compute parameters
297
+
298
+ class llm_graph_result_i {
299
+ public:
300
+ virtual ~llm_graph_result_i() = default;
301
+
302
+ virtual lm_ggml_tensor * get_logits() = 0;
303
+ virtual lm_ggml_tensor * get_embd() = 0;
304
+ virtual lm_ggml_tensor * get_embd_pooled() = 0;
305
+
306
+ virtual void set_inputs(const llama_ubatch * ubatch) = 0;
307
+ };
308
+
309
+ using llm_graph_result_ptr = std::unique_ptr<llm_graph_result_i>;
310
+
311
+
312
+ class llm_graph_result : public llm_graph_result_i {
313
+ public:
314
+ virtual ~llm_graph_result() = default;
315
+
316
+ lm_ggml_tensor * get_logits() override { return t_logits; }
317
+ lm_ggml_tensor * get_embd() override { return t_embd; }
318
+ lm_ggml_tensor * get_embd_pooled() override { return t_embd_pooled; }
319
+
320
+ void set_inputs(const llama_ubatch * ubatch) override {
321
+ for (auto & input : inputs) {
322
+ input->set_input(ubatch);
323
+ }
324
+ }
325
+
326
+ llm_graph_input_i * add_input(llm_graph_input_ptr input) {
327
+ inputs.emplace_back(std::move(input));
328
+ return inputs.back().get();
329
+ }
330
+
331
+ // important graph nodes
332
+ lm_ggml_tensor * t_logits = nullptr;
333
+ lm_ggml_tensor * t_embd = nullptr;
334
+ lm_ggml_tensor * t_embd_pooled = nullptr;
335
+
336
+ std::vector<llm_graph_input_ptr> inputs;
337
+ };
338
+
339
+ //
340
+ // llm_graph_context
341
+ //
342
+
343
+ // callback that allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
344
+ using llm_graph_cb = std::function<void(const llama_ubatch & ubatch, lm_ggml_tensor * cur, const char * name, int il)>;
345
+
346
+ struct llm_graph_params {
347
+ lm_ggml_context * ctx;
348
+
349
+ const llm_arch arch;
350
+
351
+ const llama_hparams & hparams;
352
+ const llama_cparams & cparams;
353
+ const llama_ubatch & ubatch;
354
+
355
+ lm_ggml_backend_sched * sched;
356
+ lm_ggml_backend * backend_cpu;
357
+
358
+ const llama_adapter_cvec * cvec;
359
+ const llama_adapter_loras * loras;
360
+ const llama_memory_i * memory;
361
+ const llama_cross * cross;
362
+
363
+ int32_t n_outputs;
364
+
365
+ const llm_graph_cb & cb;
366
+ };
367
+
368
+ struct llm_graph_context {
369
+ const llm_arch arch;
370
+
371
+ const llama_hparams & hparams;
372
+ const llama_cparams & cparams;
373
+ const llama_ubatch & ubatch;
374
+
375
+ const int64_t n_embd;
376
+ const int64_t n_layer;
377
+ const int64_t n_rot;
378
+ const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
379
+ const int64_t n_ctx_per_seq;
380
+ const int64_t n_head;
381
+ const int64_t n_head_kv;
382
+ const int64_t n_embd_head_k;
383
+ const int64_t n_embd_k_gqa;
384
+ const int64_t n_embd_head_v;
385
+ const int64_t n_embd_v_gqa;
386
+ const int64_t n_expert;
387
+ const int64_t n_expert_used;
388
+
389
+ const float freq_base;
390
+ const float freq_scale;
391
+ const float ext_factor;
392
+ const float attn_factor;
393
+ const float beta_fast;
394
+ const float beta_slow;
395
+ const float norm_eps;
396
+ const float norm_rms_eps;
397
+
398
+ const int32_t n_tokens;
399
+ const int32_t n_outputs;
400
+ const int32_t n_ctx_orig; // yarn
401
+
402
+ const enum llama_pooling_type pooling_type;
403
+ const enum llama_rope_type rope_type;
404
+
405
+ lm_ggml_context * ctx0 = nullptr;
406
+
407
+ lm_ggml_backend_sched * sched;
408
+
409
+ lm_ggml_backend * backend_cpu; // TODO: needed by build_attn_mha, figure out a way to remove?
410
+
411
+ const llama_adapter_cvec * cvec;
412
+ const llama_adapter_loras * loras;
413
+ const llama_memory_i * memory;
414
+ const llama_cross * cross;
415
+
416
+ const llm_graph_cb & cb_func;
417
+
418
+ std::unique_ptr<llm_graph_result> res;
419
+
420
+ llm_graph_context(const llm_graph_params & params);
421
+
422
+ int64_t n_pos_per_token() const;
423
+
424
+ void cb(lm_ggml_tensor * cur, const char * name, int il) const;
425
+
426
+ //
427
+ // common
428
+ //
429
+
430
+ lm_ggml_tensor * build_cvec(
431
+ lm_ggml_tensor * cur,
432
+ int il) const;
433
+
434
+ // do mat_mul, while optionally apply lora
435
+ lm_ggml_tensor * build_lora_mm(
436
+ lm_ggml_tensor * w,
437
+ lm_ggml_tensor * cur) const;
438
+
439
+ // do mat_mul_id, while optionally apply lora
440
+ lm_ggml_tensor * build_lora_mm_id(
441
+ lm_ggml_tensor * w, // lm_ggml_tensor * as
442
+ lm_ggml_tensor * cur, // lm_ggml_tensor * b
443
+ lm_ggml_tensor * ids) const;
444
+
445
+ lm_ggml_tensor * build_norm(
446
+ lm_ggml_tensor * cur,
447
+ lm_ggml_tensor * mw,
448
+ lm_ggml_tensor * mb,
449
+ llm_norm_type type,
450
+ int il) const;
451
+
452
+ lm_ggml_tensor * build_ffn(
453
+ lm_ggml_tensor * cur,
454
+ lm_ggml_tensor * up,
455
+ lm_ggml_tensor * up_b,
456
+ lm_ggml_tensor * up_s,
457
+ lm_ggml_tensor * gate,
458
+ lm_ggml_tensor * gate_b,
459
+ lm_ggml_tensor * gate_s,
460
+ lm_ggml_tensor * down,
461
+ lm_ggml_tensor * down_b,
462
+ lm_ggml_tensor * down_s,
463
+ lm_ggml_tensor * act_scales,
464
+ llm_ffn_op_type type_op,
465
+ llm_ffn_gate_type type_gate,
466
+ int il) const;
467
+
468
+ lm_ggml_tensor * build_moe_ffn(
469
+ lm_ggml_tensor * cur,
470
+ lm_ggml_tensor * gate_inp,
471
+ lm_ggml_tensor * up_exps,
472
+ lm_ggml_tensor * gate_exps,
473
+ lm_ggml_tensor * down_exps,
474
+ lm_ggml_tensor * exp_probs_b,
475
+ int64_t n_expert,
476
+ int64_t n_expert_used,
477
+ llm_ffn_op_type type_op,
478
+ bool norm_w,
479
+ bool scale_w,
480
+ float w_scale,
481
+ llama_expert_gating_func_type gating_op,
482
+ int il) const;
483
+
484
+ //
485
+ // inputs
486
+ //
487
+
488
+ lm_ggml_tensor * build_inp_embd(lm_ggml_tensor * tok_embd) const;
489
+ lm_ggml_tensor * build_inp_pos() const;
490
+ lm_ggml_tensor * build_inp_attn_scale() const;
491
+ lm_ggml_tensor * build_inp_out_ids() const;
492
+ lm_ggml_tensor * build_inp_mean() const;
493
+ lm_ggml_tensor * build_inp_cls() const;
494
+ lm_ggml_tensor * build_inp_s_copy() const;
495
+ lm_ggml_tensor * build_inp_s_mask() const;
496
+
497
+ lm_ggml_tensor * build_inp_cross_embd() const;
498
+ lm_ggml_tensor * build_inp_pos_bucket_enc() const;
499
+ lm_ggml_tensor * build_inp_pos_bucket_dec() const;
500
+ lm_ggml_tensor * build_pos_bias(lm_ggml_tensor * pos_bucket, lm_ggml_tensor * attn_rel_b) const;
501
+
502
+ //
503
+ // attention
504
+ //
505
+
506
+ lm_ggml_tensor * build_attn_mha(
507
+ lm_ggml_cgraph * gf,
508
+ lm_ggml_tensor * q, // [n_embd_head_q, n_tokens, n_head_q]
509
+ lm_ggml_tensor * k, // [n_embd_head_k, n_tokens, n_head_k]
510
+ lm_ggml_tensor * v, // [n_embd_head_v, n_tokens, n_head_v] (v_trans == false)
511
+ lm_ggml_tensor * kq_b,
512
+ lm_ggml_tensor * kq_mask,
513
+ bool v_trans,
514
+ float kq_scale) const;
515
+
516
+ llm_graph_input_attn_no_cache * build_attn_inp_no_cache() const;
517
+
518
+ lm_ggml_tensor * build_attn(
519
+ llm_graph_input_attn_no_cache * inp,
520
+ lm_ggml_cgraph * gf,
521
+ lm_ggml_tensor * wo,
522
+ lm_ggml_tensor * wo_b,
523
+ lm_ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
524
+ lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
525
+ lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
526
+ lm_ggml_tensor * kq_b,
527
+ float kq_scale,
528
+ int il) const;
529
+
530
+ llm_graph_input_attn_kv_unified * build_attn_inp_kv_unified() const;
531
+
532
+ lm_ggml_tensor * build_attn(
533
+ llm_graph_input_attn_kv_unified * inp,
534
+ lm_ggml_cgraph * gf,
535
+ lm_ggml_tensor * wo,
536
+ lm_ggml_tensor * wo_b,
537
+ lm_ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
538
+ lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
539
+ lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
540
+ lm_ggml_tensor * kq_b,
541
+ float kq_scale,
542
+ int il) const;
543
+
544
+ llm_graph_input_attn_cross * build_attn_inp_cross() const;
545
+
546
+ lm_ggml_tensor * build_attn(
547
+ llm_graph_input_attn_cross * inp,
548
+ lm_ggml_cgraph * gf,
549
+ lm_ggml_tensor * wo,
550
+ lm_ggml_tensor * wo_b,
551
+ lm_ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
552
+ lm_ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
553
+ lm_ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
554
+ lm_ggml_tensor * kq_b,
555
+ float kq_scale,
556
+ int il) const;
557
+
558
+ //
559
+ // recurrent
560
+ //
561
+
562
+ lm_ggml_tensor * build_copy_mask_state(
563
+ lm_ggml_cgraph * gf,
564
+ lm_ggml_tensor * s,
565
+ lm_ggml_tensor * state_copy,
566
+ lm_ggml_tensor * state_mask,
567
+ int32_t n_state,
568
+ int32_t n_seqs) const;
569
+
570
+ lm_ggml_tensor * build_rwkv_token_shift_load(
571
+ lm_ggml_cgraph * gf,
572
+ lm_ggml_tensor * state_copy,
573
+ lm_ggml_tensor * state_mask,
574
+ const llama_ubatch & ubatch,
575
+ int il) const;
576
+
577
+ lm_ggml_tensor * build_rwkv_token_shift_store(
578
+ lm_ggml_tensor * token_shift,
579
+ const llama_ubatch & ubatch,
580
+ int il) const;
581
+
582
+ //
583
+ // pooling
584
+ //
585
+
586
+ void build_pooling(
587
+ lm_ggml_cgraph * gf,
588
+ lm_ggml_tensor * cls,
589
+ lm_ggml_tensor * cls_b,
590
+ lm_ggml_tensor * cls_out,
591
+ lm_ggml_tensor * cls_out_b) const;
592
+ };