cui-llama.rn 1.5.0 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (309) hide show
  1. package/LICENSE +20 -20
  2. package/README.md +317 -319
  3. package/android/build.gradle +116 -116
  4. package/android/gradle.properties +5 -5
  5. package/android/src/main/AndroidManifest.xml +4 -4
  6. package/android/src/main/CMakeLists.txt +124 -124
  7. package/android/src/main/java/com/rnllama/LlamaContext.java +645 -645
  8. package/android/src/main/java/com/rnllama/RNLlama.java +695 -695
  9. package/android/src/main/java/com/rnllama/RNLlamaPackage.java +48 -48
  10. package/android/src/main/jni-utils.h +100 -100
  11. package/android/src/main/jni.cpp +1263 -1263
  12. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  13. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  14. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  15. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  16. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  17. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  18. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  19. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  20. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +135 -135
  21. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +136 -136
  22. package/cpp/README.md +4 -4
  23. package/cpp/ggml-llama-sim.metallib +0 -0
  24. package/cpp/ggml-llama.metallib +0 -0
  25. package/cpp/ggml-metal-impl.h +597 -597
  26. package/cpp/ggml-metal.m +4 -0
  27. package/cpp/ggml.h +1 -1
  28. package/cpp/rn-llama.cpp +873 -873
  29. package/cpp/rn-llama.h +138 -138
  30. package/cpp/sampling.h +107 -107
  31. package/cpp/unicode-data.cpp +7034 -7034
  32. package/cpp/unicode-data.h +20 -20
  33. package/cpp/unicode.cpp +849 -849
  34. package/cpp/unicode.h +66 -66
  35. package/ios/CMakeLists.txt +116 -108
  36. package/ios/RNLlama.h +7 -7
  37. package/ios/RNLlama.mm +418 -405
  38. package/ios/RNLlamaContext.h +57 -57
  39. package/ios/RNLlamaContext.mm +835 -835
  40. package/ios/rnllama.xcframework/Info.plist +74 -74
  41. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/binary-ops.h +16 -0
  42. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +143 -0
  43. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +677 -0
  44. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
  45. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
  46. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  47. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
  48. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
  49. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
  50. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
  51. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
  52. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
  53. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
  54. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +138 -0
  55. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +594 -0
  56. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  57. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
  58. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
  59. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
  60. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
  61. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +2222 -0
  62. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/gguf.h +202 -0
  63. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  64. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json.hpp +24766 -0
  65. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
  66. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +428 -0
  67. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +88 -0
  68. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +56 -0
  69. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +265 -0
  70. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
  71. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
  72. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
  73. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +592 -0
  74. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +156 -0
  75. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
  76. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-io.h +35 -0
  77. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +213 -0
  78. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +21 -0
  79. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
  80. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
  81. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +409 -0
  82. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
  83. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
  84. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +1434 -0
  85. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/log.h +132 -0
  86. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  87. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  88. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ops.h +128 -0
  89. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +138 -0
  90. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sampling.h +107 -0
  91. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sgemm.h +14 -0
  92. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/simd-mappings.h +888 -0
  93. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/speculative.h +28 -0
  94. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unary-ops.h +28 -0
  95. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
  96. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode.h +66 -0
  97. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/vec.h +802 -0
  98. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
  99. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  100. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
  101. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/binary-ops.h +16 -0
  102. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
  103. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +677 -0
  104. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
  105. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
  106. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  107. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
  108. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
  109. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
  110. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
  111. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
  112. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
  113. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
  114. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +138 -0
  115. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +594 -0
  116. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  117. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
  118. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
  119. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
  120. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
  121. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2222 -0
  122. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
  123. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  124. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
  125. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
  126. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +428 -0
  127. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +88 -0
  128. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +56 -0
  129. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +265 -0
  130. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
  131. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
  132. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
  133. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +592 -0
  134. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +156 -0
  135. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
  136. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
  137. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +213 -0
  138. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +21 -0
  139. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
  140. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
  141. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +409 -0
  142. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
  143. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
  144. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1434 -0
  145. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
  146. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  147. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  148. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ops.h +128 -0
  149. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +138 -0
  150. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
  151. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +14 -0
  152. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/simd-mappings.h +888 -0
  153. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
  154. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +28 -0
  155. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
  156. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
  157. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +802 -0
  158. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  159. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
  160. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  161. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  162. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/binary-ops.h +16 -0
  163. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +143 -0
  164. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +677 -0
  165. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
  166. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
  167. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  168. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
  169. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
  170. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
  171. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
  172. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
  173. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
  174. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
  175. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +138 -0
  176. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +594 -0
  177. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  178. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
  179. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
  180. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
  181. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
  182. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +2222 -0
  183. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/gguf.h +202 -0
  184. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  185. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json.hpp +24766 -0
  186. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
  187. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +428 -0
  188. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +88 -0
  189. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +56 -0
  190. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +265 -0
  191. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
  192. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
  193. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
  194. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +592 -0
  195. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +156 -0
  196. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
  197. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-io.h +35 -0
  198. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +213 -0
  199. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +21 -0
  200. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
  201. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
  202. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +409 -0
  203. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
  204. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
  205. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +1434 -0
  206. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/log.h +132 -0
  207. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  208. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  209. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ops.h +128 -0
  210. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +138 -0
  211. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sampling.h +107 -0
  212. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sgemm.h +14 -0
  213. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/simd-mappings.h +888 -0
  214. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/speculative.h +28 -0
  215. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unary-ops.h +28 -0
  216. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
  217. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode.h +66 -0
  218. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/vec.h +802 -0
  219. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
  220. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  221. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
  222. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/binary-ops.h +16 -0
  223. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
  224. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +677 -0
  225. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
  226. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
  227. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  228. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
  229. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
  230. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
  231. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
  232. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
  233. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
  234. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
  235. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +138 -0
  236. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +594 -0
  237. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  238. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
  239. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
  240. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
  241. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
  242. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2222 -0
  243. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
  244. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  245. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
  246. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
  247. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +428 -0
  248. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +88 -0
  249. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +56 -0
  250. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +265 -0
  251. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
  252. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
  253. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
  254. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +592 -0
  255. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +156 -0
  256. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
  257. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
  258. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +213 -0
  259. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +21 -0
  260. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
  261. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
  262. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +409 -0
  263. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
  264. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
  265. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1434 -0
  266. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
  267. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  268. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  269. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ops.h +128 -0
  270. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +138 -0
  271. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
  272. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +14 -0
  273. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/simd-mappings.h +888 -0
  274. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
  275. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +28 -0
  276. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
  277. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
  278. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +802 -0
  279. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  280. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
  281. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  282. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  283. package/jest/mock.js +203 -203
  284. package/lib/commonjs/NativeRNLlama.js +1 -2
  285. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  286. package/lib/commonjs/chat.js.map +1 -1
  287. package/lib/commonjs/grammar.js +12 -31
  288. package/lib/commonjs/grammar.js.map +1 -1
  289. package/lib/commonjs/index.js +47 -47
  290. package/lib/commonjs/index.js.map +1 -1
  291. package/lib/commonjs/package.json +1 -0
  292. package/lib/module/NativeRNLlama.js +2 -0
  293. package/lib/module/NativeRNLlama.js.map +1 -1
  294. package/lib/module/chat.js +2 -0
  295. package/lib/module/chat.js.map +1 -1
  296. package/lib/module/grammar.js +14 -31
  297. package/lib/module/grammar.js.map +1 -1
  298. package/lib/module/index.js +47 -45
  299. package/lib/module/index.js.map +1 -1
  300. package/lib/module/package.json +1 -0
  301. package/lib/typescript/NativeRNLlama.d.ts +6 -4
  302. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  303. package/lib/typescript/index.d.ts.map +1 -1
  304. package/llama-rn.podspec +48 -48
  305. package/package.json +233 -233
  306. package/src/NativeRNLlama.ts +426 -426
  307. package/src/chat.ts +44 -44
  308. package/src/grammar.ts +854 -854
  309. package/src/index.ts +495 -487
@@ -0,0 +1,169 @@
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+
5
+ #include "llama-impl.h"
6
+ #include "llama-arch.h"
7
+ #include "llama-mmap.h"
8
+
9
+ #include "ggml-cpp.h"
10
+
11
+ #include <cstddef>
12
+ #include <map>
13
+ #include <stdexcept>
14
+ #include <unordered_map>
15
+
16
+ using llama_buf_map = std::unordered_map<uint32_t, lm_ggml_backend_buffer_t>;
17
+
18
+ enum llama_fver {
19
+ LM_GGUF_FILE_VERSION_V1 = 1,
20
+ LM_GGUF_FILE_VERSION_V2 = 2,
21
+ LM_GGUF_FILE_VERSION_V3 = 3,
22
+ };
23
+
24
+ const char * llama_file_version_name(llama_fver version);
25
+
26
+ struct llama_model_loader {
27
+ // Holds information on a model weight
28
+ struct llama_tensor_weight {
29
+ uint16_t idx; // source file index
30
+ size_t offs; // tensor data offset in the original file
31
+
32
+ lm_ggml_tensor * tensor;
33
+
34
+ llama_tensor_weight(const llama_file * file, uint16_t idx, const struct lm_gguf_context * lm_gguf_ctx, lm_ggml_tensor * tensor) : idx(idx), tensor(tensor) {
35
+ const int tensor_idx = lm_gguf_find_tensor(lm_gguf_ctx, lm_ggml_get_name(tensor));
36
+ if (tensor_idx < 0) {
37
+ throw std::runtime_error(format("tensor '%s' not found in the model", lm_ggml_get_name(tensor)));
38
+ }
39
+
40
+ offs = lm_gguf_get_data_offset(lm_gguf_ctx) + lm_gguf_get_tensor_offset(lm_gguf_ctx, tensor_idx);
41
+ if (offs + lm_ggml_nbytes(tensor) < offs || offs + lm_ggml_nbytes(tensor) > file->size()) {
42
+ throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", lm_ggml_get_name(tensor)));
43
+ }
44
+ }
45
+ };
46
+
47
+ // custom comparator to sort weights more nicely by layer
48
+ struct weight_name_comparer {
49
+ bool operator()(const std::string & a, const std::string & b) const {
50
+ int a_layer = -1;
51
+ int b_layer = -1;
52
+ sscanf(a.c_str(), "blk.%d.", &a_layer);
53
+ sscanf(b.c_str(), "blk.%d.", &b_layer);
54
+ if (a_layer != b_layer) {
55
+ return a_layer < b_layer;
56
+ }
57
+ return a < b;
58
+ }
59
+ };
60
+
61
+ static const int TENSOR_NOT_REQUIRED = 1;
62
+ static const int TENSOR_DUPLICATED = 2;
63
+
64
+ int n_kv = 0;
65
+ int n_tensors = 0;
66
+ int n_created = 0;
67
+
68
+ uint64_t n_elements = 0;
69
+ size_t n_bytes = 0;
70
+
71
+ bool use_mmap = false;
72
+ bool check_tensors;
73
+
74
+ llama_files files;
75
+ llama_ftype ftype;
76
+ llama_fver fver;
77
+
78
+ llama_mmaps mappings;
79
+
80
+ std::map<std::string, llama_tensor_weight, weight_name_comparer> weights_map;
81
+ std::unordered_map<std::string, llama_model_kv_override> kv_overrides;
82
+ const llama_model_tensor_buft_override * tensor_buft_overrides;
83
+
84
+ lm_gguf_context_ptr meta;
85
+ std::vector<lm_ggml_context_ptr> contexts;
86
+
87
+ std::string arch_name;
88
+ LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
89
+
90
+ size_t size_done = 0;
91
+ size_t size_data = 0;
92
+ std::vector<std::pair<size_t, size_t>> mmaps_used;
93
+
94
+ llama_model_loader(
95
+ const std::string & fname,
96
+ std::vector<std::string> & splits, // optional, only need if the split does not follow naming scheme
97
+ bool use_mmap,
98
+ bool check_tensors,
99
+ const llama_model_kv_override * param_overrides_p,
100
+ const llama_model_tensor_buft_override * param_tensor_buft_overrides_p);
101
+
102
+ template<typename T>
103
+ typename std::enable_if<std::is_integral<T>::value, bool>::type
104
+ get_arr_n(const std::string & key, T & result, bool required = true);
105
+
106
+ template<typename T>
107
+ typename std::enable_if<std::is_integral<T>::value, bool>::type
108
+ get_arr_n(enum llm_kv kid, T & result, bool required = true);
109
+
110
+ template<typename T>
111
+ bool get_arr(const std::string & key, std::vector<T> & result, bool required = true);
112
+
113
+ template<typename T, size_t N_MAX>
114
+ bool get_arr(const std::string & key, std::array<T, N_MAX> & result, bool required = true);
115
+
116
+ template<typename T>
117
+ bool get_arr(enum llm_kv kid, T & result, bool required = true);
118
+
119
+ template<typename T>
120
+ bool get_key(const std::string & key, T & result, bool required = true);
121
+
122
+ template<typename T>
123
+ bool get_key(enum llm_kv kid, T & result, bool required = true);
124
+
125
+ template<typename T, size_t N_MAX>
126
+ bool get_key_or_arr(const std::string & key, std::array<T, N_MAX> & result, uint32_t n, bool required = true);
127
+
128
+ template<typename T>
129
+ bool get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required = true);
130
+
131
+ std::string get_arch_name() const;
132
+
133
+ enum llm_arch get_arch() const;
134
+
135
+ const llama_tensor_weight * get_weight(const char * name) const;
136
+
137
+ const llama_tensor_weight & require_weight(const char * name) const;
138
+
139
+ struct lm_ggml_tensor * get_tensor_meta(const char * name) const;
140
+
141
+ struct lm_ggml_tensor * require_tensor_meta(const std::string & name) const;
142
+
143
+ const struct lm_ggml_tensor * check_tensor_dims(const std::string & name, const std::vector<int64_t> & ne, bool required) const;
144
+
145
+ struct lm_ggml_tensor * create_tensor(struct lm_ggml_context * ctx, const std::string & name, const std::initializer_list<int64_t> & ne, int flags = 0);
146
+
147
+ struct lm_ggml_tensor * create_tensor_as_view(struct lm_ggml_context * ctx, struct lm_ggml_tensor * base, const std::string & name, const std::initializer_list<int64_t> & ne, size_t offset, bool required = true);
148
+
149
+ void done_getting_tensors() const;
150
+
151
+ void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr);
152
+
153
+ void get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, lm_ggml_context * ctx) const;
154
+
155
+ // for backwards compatibility, does not support ggml-backend
156
+ void load_data_for(struct lm_ggml_tensor * cur) const;
157
+
158
+ // Returns false if cancelled by progress_callback
159
+ bool load_all_data(
160
+ struct lm_ggml_context * ctx,
161
+ llama_buf_map & bufs,
162
+ llama_mlocks * lmlocks,
163
+ llama_progress_callback progress_callback,
164
+ void * progress_callback_user_data);
165
+
166
+ std::string ftype_name() const;
167
+
168
+ void print_info() const;
169
+ };
@@ -0,0 +1,409 @@
1
+ #pragma once
2
+
3
+ #include "llama.h"
4
+ #include "llama-arch.h"
5
+ #include "llama-graph.h"
6
+ #include "llama-hparams.h"
7
+ #include "llama-memory.h"
8
+ #include "llama-vocab.h"
9
+
10
+ #include <memory>
11
+ #include <string>
12
+ #include <unordered_map>
13
+ #include <vector>
14
+
15
+ struct llama_cparams;
16
+ struct llama_ubatch;
17
+ struct llama_model_loader;
18
+
19
+ // available models
20
+ enum llm_type {
21
+ LLM_TYPE_UNKNOWN,
22
+ LLM_TYPE_14M,
23
+ LLM_TYPE_17M,
24
+ LLM_TYPE_22M,
25
+ LLM_TYPE_33M,
26
+ LLM_TYPE_60M,
27
+ LLM_TYPE_70M,
28
+ LLM_TYPE_80M,
29
+ LLM_TYPE_109M,
30
+ LLM_TYPE_137M,
31
+ LLM_TYPE_160M,
32
+ LLM_TYPE_190M,
33
+ LLM_TYPE_220M,
34
+ LLM_TYPE_250M,
35
+ LLM_TYPE_270M,
36
+ LLM_TYPE_335M,
37
+ LLM_TYPE_410M,
38
+ LLM_TYPE_450M,
39
+ LLM_TYPE_770M,
40
+ LLM_TYPE_780M,
41
+ LLM_TYPE_0_5B,
42
+ LLM_TYPE_1B,
43
+ LLM_TYPE_1_3B,
44
+ LLM_TYPE_1_4B,
45
+ LLM_TYPE_1_5B,
46
+ LLM_TYPE_1_6B,
47
+ LLM_TYPE_1_8B,
48
+ LLM_TYPE_2B,
49
+ LLM_TYPE_2_8B,
50
+ LLM_TYPE_2_9B,
51
+ LLM_TYPE_3B,
52
+ LLM_TYPE_4B,
53
+ LLM_TYPE_6B,
54
+ LLM_TYPE_6_9B,
55
+ LLM_TYPE_7B,
56
+ LLM_TYPE_8B,
57
+ LLM_TYPE_9B,
58
+ LLM_TYPE_11B,
59
+ LLM_TYPE_12B,
60
+ LLM_TYPE_13B,
61
+ LLM_TYPE_14B,
62
+ LLM_TYPE_15B,
63
+ LLM_TYPE_16B,
64
+ LLM_TYPE_20B,
65
+ LLM_TYPE_30B,
66
+ LLM_TYPE_32B,
67
+ LLM_TYPE_34B,
68
+ LLM_TYPE_35B,
69
+ LLM_TYPE_40B,
70
+ LLM_TYPE_65B,
71
+ LLM_TYPE_70B,
72
+ LLM_TYPE_236B,
73
+ LLM_TYPE_314B,
74
+ LLM_TYPE_671B,
75
+ LLM_TYPE_SMALL,
76
+ LLM_TYPE_MEDIUM,
77
+ LLM_TYPE_LARGE,
78
+ LLM_TYPE_XL,
79
+ LLM_TYPE_A1_7B,
80
+ LLM_TYPE_A2_7B,
81
+ LLM_TYPE_8x7B,
82
+ LLM_TYPE_8x22B,
83
+ LLM_TYPE_16x12B,
84
+ LLM_TYPE_16x3_8B,
85
+ LLM_TYPE_10B_128x3_66B,
86
+ LLM_TYPE_57B_A14B,
87
+ LLM_TYPE_27B,
88
+ LLM_TYPE_290B,
89
+ LLM_TYPE_17B_16E, // llama4 Scout
90
+ LLM_TYPE_17B_128E, // llama4 Maverick
91
+ };
92
+
93
+ struct llama_layer_posnet {
94
+ // resnet
95
+ struct lm_ggml_tensor * norm1 = nullptr;
96
+ struct lm_ggml_tensor * norm1_b = nullptr;
97
+
98
+ struct lm_ggml_tensor * conv1 = nullptr;
99
+ struct lm_ggml_tensor * conv1_b = nullptr;
100
+
101
+ struct lm_ggml_tensor * norm2 = nullptr;
102
+ struct lm_ggml_tensor * norm2_b = nullptr;
103
+
104
+ struct lm_ggml_tensor * conv2 = nullptr;
105
+ struct lm_ggml_tensor * conv2_b = nullptr;
106
+
107
+ // attention
108
+ struct lm_ggml_tensor * attn_norm = nullptr;
109
+ struct lm_ggml_tensor * attn_norm_b = nullptr;
110
+
111
+ struct lm_ggml_tensor * attn_q = nullptr;
112
+ struct lm_ggml_tensor * attn_q_b = nullptr;
113
+
114
+ struct lm_ggml_tensor * attn_k = nullptr;
115
+ struct lm_ggml_tensor * attn_k_b = nullptr;
116
+
117
+ struct lm_ggml_tensor * attn_v = nullptr;
118
+ struct lm_ggml_tensor * attn_v_b = nullptr;
119
+
120
+ struct lm_ggml_tensor * attn_o = nullptr;
121
+ struct lm_ggml_tensor * attn_o_b = nullptr;
122
+
123
+ // normalize
124
+ struct lm_ggml_tensor * norm = nullptr;
125
+ struct lm_ggml_tensor * norm_b = nullptr;
126
+ };
127
+
128
+ struct llama_layer_convnext {
129
+ struct lm_ggml_tensor * dw = nullptr;
130
+ struct lm_ggml_tensor * dw_b = nullptr;
131
+
132
+ struct lm_ggml_tensor * norm = nullptr;
133
+ struct lm_ggml_tensor * norm_b = nullptr;
134
+
135
+ struct lm_ggml_tensor * pw1 = nullptr;
136
+ struct lm_ggml_tensor * pw1_b = nullptr;
137
+
138
+ struct lm_ggml_tensor * pw2 = nullptr;
139
+ struct lm_ggml_tensor * pw2_b = nullptr;
140
+
141
+ struct lm_ggml_tensor * gamma = nullptr;
142
+ };
143
+
144
+ struct llama_layer {
145
+ // normalization
146
+ struct lm_ggml_tensor * attn_norm = nullptr;
147
+ struct lm_ggml_tensor * attn_norm_b = nullptr;
148
+ struct lm_ggml_tensor * attn_norm_2 = nullptr;
149
+ struct lm_ggml_tensor * attn_norm_2_b = nullptr;
150
+ struct lm_ggml_tensor * attn_q_norm = nullptr;
151
+ struct lm_ggml_tensor * attn_q_norm_b = nullptr;
152
+ struct lm_ggml_tensor * attn_k_norm = nullptr;
153
+ struct lm_ggml_tensor * attn_k_norm_b = nullptr;
154
+ struct lm_ggml_tensor * attn_out_norm = nullptr;
155
+ struct lm_ggml_tensor * attn_out_norm_b = nullptr;
156
+ struct lm_ggml_tensor * attn_q_a_norm = nullptr;
157
+ struct lm_ggml_tensor * attn_kv_a_norm = nullptr;
158
+ struct lm_ggml_tensor * attn_sub_norm = nullptr;
159
+ struct lm_ggml_tensor * attn_post_norm = nullptr;
160
+ struct lm_ggml_tensor * ffn_sub_norm = nullptr;
161
+ struct lm_ggml_tensor * attn_norm_cross = nullptr;
162
+ struct lm_ggml_tensor * attn_norm_enc = nullptr;
163
+
164
+ // attention
165
+ struct lm_ggml_tensor * wq = nullptr;
166
+ struct lm_ggml_tensor * wk = nullptr;
167
+ struct lm_ggml_tensor * wv = nullptr;
168
+ struct lm_ggml_tensor * wo = nullptr;
169
+ struct lm_ggml_tensor * wqkv = nullptr;
170
+ struct lm_ggml_tensor * wq_a = nullptr;
171
+ struct lm_ggml_tensor * wq_b = nullptr;
172
+ struct lm_ggml_tensor * wkv_a_mqa = nullptr;
173
+ struct lm_ggml_tensor * wkv_b = nullptr;
174
+ struct lm_ggml_tensor * wq_cross = nullptr;
175
+ struct lm_ggml_tensor * wk_cross = nullptr;
176
+ struct lm_ggml_tensor * wv_cross = nullptr;
177
+ struct lm_ggml_tensor * wo_cross = nullptr;
178
+ struct lm_ggml_tensor * wq_enc = nullptr;
179
+ struct lm_ggml_tensor * wk_enc = nullptr;
180
+ struct lm_ggml_tensor * wv_enc = nullptr;
181
+ struct lm_ggml_tensor * wo_enc = nullptr;
182
+
183
+ // attention bias
184
+ struct lm_ggml_tensor * bq = nullptr;
185
+ struct lm_ggml_tensor * bk = nullptr;
186
+ struct lm_ggml_tensor * bv = nullptr;
187
+ struct lm_ggml_tensor * bo = nullptr;
188
+ struct lm_ggml_tensor * bqkv = nullptr;
189
+
190
+ // relative position bias
191
+ struct lm_ggml_tensor * attn_rel_b = nullptr;
192
+ struct lm_ggml_tensor * attn_rel_b_enc = nullptr;
193
+ struct lm_ggml_tensor * attn_rel_b_cross = nullptr;
194
+
195
+ // normalization
196
+ struct lm_ggml_tensor * ffn_norm = nullptr;
197
+ struct lm_ggml_tensor * ffn_norm_b = nullptr;
198
+ struct lm_ggml_tensor * ffn_post_norm = nullptr;
199
+ struct lm_ggml_tensor * layer_out_norm = nullptr;
200
+ struct lm_ggml_tensor * layer_out_norm_b = nullptr;
201
+ struct lm_ggml_tensor * ffn_norm_exps = nullptr;
202
+ struct lm_ggml_tensor * ffn_norm_enc = nullptr;
203
+
204
+ // ff
205
+ struct lm_ggml_tensor * ffn_gate = nullptr; // w1
206
+ struct lm_ggml_tensor * ffn_down = nullptr; // w2
207
+ struct lm_ggml_tensor * ffn_up = nullptr; // w3
208
+ struct lm_ggml_tensor * ffn_gate_enc = nullptr;
209
+ struct lm_ggml_tensor * ffn_down_enc = nullptr;
210
+ struct lm_ggml_tensor * ffn_up_enc = nullptr;
211
+
212
+ // ff MoE
213
+ struct lm_ggml_tensor * ffn_gate_inp = nullptr;
214
+ struct lm_ggml_tensor * ffn_gate_exps = nullptr;
215
+ struct lm_ggml_tensor * ffn_down_exps = nullptr;
216
+ struct lm_ggml_tensor * ffn_up_exps = nullptr;
217
+
218
+ // ff shared expert (shexp)
219
+ struct lm_ggml_tensor * ffn_gate_inp_shexp = nullptr;
220
+ struct lm_ggml_tensor * ffn_gate_shexp = nullptr;
221
+ struct lm_ggml_tensor * ffn_down_shexp = nullptr;
222
+ struct lm_ggml_tensor * ffn_up_shexp = nullptr;
223
+
224
+ // ff bias
225
+ struct lm_ggml_tensor * ffn_gate_b = nullptr;
226
+ struct lm_ggml_tensor * ffn_down_b = nullptr; // b2
227
+ struct lm_ggml_tensor * ffn_up_b = nullptr; // b3
228
+ struct lm_ggml_tensor * ffn_act = nullptr;
229
+ struct lm_ggml_tensor * ffn_exp_probs_b = nullptr;
230
+
231
+ // mamba proj
232
+ struct lm_ggml_tensor * ssm_in = nullptr;
233
+ struct lm_ggml_tensor * ssm_x = nullptr;
234
+ struct lm_ggml_tensor * ssm_dt = nullptr;
235
+ struct lm_ggml_tensor * ssm_out = nullptr;
236
+
237
+ // mamba
238
+ struct lm_ggml_tensor * ssm_conv1d = nullptr;
239
+ struct lm_ggml_tensor * ssm_a = nullptr;
240
+ struct lm_ggml_tensor * ssm_d = nullptr;
241
+
242
+ // mamba bias
243
+ struct lm_ggml_tensor * ssm_conv1d_b = nullptr;
244
+ struct lm_ggml_tensor * ssm_dt_b = nullptr;
245
+
246
+ // rwkv
247
+ struct lm_ggml_tensor * time_mix_w1 = nullptr;
248
+ struct lm_ggml_tensor * time_mix_w2 = nullptr;
249
+ struct lm_ggml_tensor * time_mix_lerp_x = nullptr;
250
+ struct lm_ggml_tensor * time_mix_lerp_w = nullptr;
251
+ struct lm_ggml_tensor * time_mix_lerp_k = nullptr;
252
+ struct lm_ggml_tensor * time_mix_lerp_v = nullptr;
253
+ struct lm_ggml_tensor * time_mix_lerp_r = nullptr;
254
+ struct lm_ggml_tensor * time_mix_lerp_g = nullptr;
255
+ struct lm_ggml_tensor * time_mix_lerp_fused = nullptr;
256
+
257
+ struct lm_ggml_tensor * time_mix_first = nullptr;
258
+ struct lm_ggml_tensor * time_mix_decay = nullptr;
259
+ struct lm_ggml_tensor * time_mix_decay_w1 = nullptr;
260
+ struct lm_ggml_tensor * time_mix_decay_w2 = nullptr;
261
+ struct lm_ggml_tensor * time_mix_key = nullptr;
262
+ struct lm_ggml_tensor * time_mix_key_b = nullptr;
263
+ struct lm_ggml_tensor * time_mix_value = nullptr;
264
+ struct lm_ggml_tensor * time_mix_value_b = nullptr;
265
+ struct lm_ggml_tensor * time_mix_receptance = nullptr;
266
+ struct lm_ggml_tensor * time_mix_receptance_b = nullptr;
267
+ struct lm_ggml_tensor * time_mix_gate = nullptr;
268
+
269
+ // rwkv7
270
+ struct lm_ggml_tensor * time_mix_w0 = nullptr;
271
+ struct lm_ggml_tensor * time_mix_a0 = nullptr;
272
+ struct lm_ggml_tensor * time_mix_a1 = nullptr;
273
+ struct lm_ggml_tensor * time_mix_a2 = nullptr;
274
+ struct lm_ggml_tensor * time_mix_v0 = nullptr;
275
+ struct lm_ggml_tensor * time_mix_v1 = nullptr;
276
+ struct lm_ggml_tensor * time_mix_v2 = nullptr;
277
+ struct lm_ggml_tensor * time_mix_g1 = nullptr;
278
+ struct lm_ggml_tensor * time_mix_g2 = nullptr;
279
+ struct lm_ggml_tensor * time_mix_k_k = nullptr;
280
+ struct lm_ggml_tensor * time_mix_k_a = nullptr;
281
+ struct lm_ggml_tensor * time_mix_r_k = nullptr;
282
+
283
+ struct lm_ggml_tensor * time_mix_ln = nullptr;
284
+ struct lm_ggml_tensor * time_mix_ln_b = nullptr;
285
+ struct lm_ggml_tensor * time_mix_output = nullptr;
286
+
287
+ struct lm_ggml_tensor * channel_mix_lerp_k = nullptr;
288
+ struct lm_ggml_tensor * channel_mix_lerp_r = nullptr;
289
+
290
+ struct lm_ggml_tensor * channel_mix_key = nullptr;
291
+ struct lm_ggml_tensor * channel_mix_receptance = nullptr;
292
+ struct lm_ggml_tensor * channel_mix_value = nullptr;
293
+
294
+ // long rope factors
295
+ struct lm_ggml_tensor * rope_long = nullptr;
296
+ struct lm_ggml_tensor * rope_short = nullptr;
297
+ struct lm_ggml_tensor * rope_freqs = nullptr;
298
+
299
+ // bitnet scale
300
+ struct lm_ggml_tensor * wq_scale = nullptr;
301
+ struct lm_ggml_tensor * wk_scale = nullptr;
302
+ struct lm_ggml_tensor * wv_scale = nullptr;
303
+ struct lm_ggml_tensor * wo_scale = nullptr;
304
+ struct lm_ggml_tensor * ffn_gate_scale = nullptr;
305
+ struct lm_ggml_tensor * ffn_up_scale = nullptr;
306
+ struct lm_ggml_tensor * ffn_down_scale = nullptr;
307
+
308
+ struct llama_layer_posnet posnet;
309
+
310
+ struct llama_layer_convnext convnext;
311
+ };
312
+
313
+ struct llama_model {
314
+ llm_type type = LLM_TYPE_UNKNOWN;
315
+ llm_arch arch = LLM_ARCH_UNKNOWN;
316
+
317
+ std::string name = "n/a";
318
+
319
+ llama_hparams hparams = {};
320
+ llama_vocab vocab;
321
+
322
+ struct lm_ggml_tensor * tok_embd = nullptr;
323
+ struct lm_ggml_tensor * type_embd = nullptr;
324
+ struct lm_ggml_tensor * pos_embd = nullptr;
325
+ struct lm_ggml_tensor * tok_norm = nullptr;
326
+ struct lm_ggml_tensor * tok_norm_b = nullptr;
327
+
328
+ struct lm_ggml_tensor * output_norm = nullptr;
329
+ struct lm_ggml_tensor * output_norm_b = nullptr;
330
+ struct lm_ggml_tensor * output = nullptr;
331
+ struct lm_ggml_tensor * output_b = nullptr;
332
+ struct lm_ggml_tensor * output_norm_enc = nullptr;
333
+
334
+ // classifier
335
+ struct lm_ggml_tensor * cls = nullptr;
336
+ struct lm_ggml_tensor * cls_b = nullptr;
337
+ struct lm_ggml_tensor * cls_out = nullptr;
338
+ struct lm_ggml_tensor * cls_out_b = nullptr;
339
+
340
+ struct lm_ggml_tensor * conv1d = nullptr;
341
+ struct lm_ggml_tensor * conv1d_b = nullptr;
342
+
343
+ std::vector<llama_layer> layers;
344
+
345
+ llama_model_params params;
346
+
347
+ // gguf metadata
348
+ std::unordered_map<std::string, std::string> lm_gguf_kv;
349
+
350
+ // list of devices used in this model
351
+ std::vector<lm_ggml_backend_dev_t> devices;
352
+
353
+ // for quantize-stats only
354
+ std::vector<std::pair<std::string, struct lm_ggml_tensor *>> tensors_by_name;
355
+
356
+ int64_t t_load_us = 0;
357
+ int64_t t_start_us = 0;
358
+
359
+ explicit llama_model(const struct llama_model_params & params);
360
+ ~llama_model();
361
+
362
+ void load_stats (llama_model_loader & ml);
363
+ void load_arch (llama_model_loader & ml);
364
+ void load_hparams(llama_model_loader & ml);
365
+ void load_vocab (llama_model_loader & ml);
366
+ bool load_tensors(llama_model_loader & ml); // returns false if cancelled by progress_callback
367
+
368
+ std::string arch_name() const;
369
+ std::string type_name() const;
370
+
371
+ std::string desc() const;
372
+
373
+ size_t size() const;
374
+ size_t n_tensors() const;
375
+ size_t n_devices() const;
376
+
377
+ // total number of parameters in the model
378
+ uint64_t n_elements() const;
379
+
380
+ void print_info() const;
381
+
382
+ lm_ggml_backend_dev_t dev_layer(int il) const;
383
+ lm_ggml_backend_dev_t dev_output() const;
384
+
385
+ lm_ggml_backend_buffer_type_t select_buft(int il) const;
386
+
387
+ bool has_tensor_overrides() const;
388
+
389
+ const struct lm_ggml_tensor * get_tensor(const char * name) const;
390
+
391
+ // TODO: move this to new llm_arch_model_i interface
392
+ llama_memory_i * create_memory() const; // TODO: params
393
+
394
+ // TODO: move this to new llm_arch_model_i interface
395
+ llm_graph_result_ptr build_graph(
396
+ const llm_graph_params & params,
397
+ lm_ggml_cgraph * gf,
398
+ llm_graph_type type) const;
399
+
400
+ private:
401
+ struct impl;
402
+ std::unique_ptr<impl> pimpl;
403
+ };
404
+
405
+ const char * llm_type_name(llm_type type);
406
+
407
+ // For internal test use
408
+ // TODO: remove
409
+ const std::vector<std::pair<std::string, lm_ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model);
@@ -0,0 +1,32 @@
1
+ #pragma once
2
+
3
+ // TODO: rename llama-sampling.h/.cpp to llama-sampler.h/.cpp ?
4
+
5
+ #include "llama.h"
6
+
7
+ #include <vector>
8
+
9
+ struct llama_vocab;
10
+ struct llama_grammar;
11
+
12
+ // sampler chain
13
+
14
+ struct llama_sampler_chain {
15
+ llama_sampler_chain_params params;
16
+
17
+ std::vector<struct llama_sampler *> samplers;
18
+
19
+ // timing
20
+
21
+ mutable int64_t t_sample_us;
22
+
23
+ mutable int32_t n_sample;
24
+ };
25
+
26
+ struct llama_sampler * llama_sampler_init_dry_testing(
27
+ int32_t context_size,
28
+ float dry_multiplier,
29
+ float dry_base,
30
+ int32_t dry_allowed_length,
31
+ int32_t dry_penalty_last_n,
32
+ const std::vector<std::vector<llama_token>>& seq_breakers);