cui-llama.rn 1.5.0 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (309) hide show
  1. package/LICENSE +20 -20
  2. package/README.md +317 -319
  3. package/android/build.gradle +116 -116
  4. package/android/gradle.properties +5 -5
  5. package/android/src/main/AndroidManifest.xml +4 -4
  6. package/android/src/main/CMakeLists.txt +124 -124
  7. package/android/src/main/java/com/rnllama/LlamaContext.java +645 -645
  8. package/android/src/main/java/com/rnllama/RNLlama.java +695 -695
  9. package/android/src/main/java/com/rnllama/RNLlamaPackage.java +48 -48
  10. package/android/src/main/jni-utils.h +100 -100
  11. package/android/src/main/jni.cpp +1263 -1263
  12. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  13. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  14. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  15. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  16. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  17. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  18. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  19. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  20. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +135 -135
  21. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +136 -136
  22. package/cpp/README.md +4 -4
  23. package/cpp/ggml-llama-sim.metallib +0 -0
  24. package/cpp/ggml-llama.metallib +0 -0
  25. package/cpp/ggml-metal-impl.h +597 -597
  26. package/cpp/ggml-metal.m +4 -0
  27. package/cpp/ggml.h +1 -1
  28. package/cpp/rn-llama.cpp +873 -873
  29. package/cpp/rn-llama.h +138 -138
  30. package/cpp/sampling.h +107 -107
  31. package/cpp/unicode-data.cpp +7034 -7034
  32. package/cpp/unicode-data.h +20 -20
  33. package/cpp/unicode.cpp +849 -849
  34. package/cpp/unicode.h +66 -66
  35. package/ios/CMakeLists.txt +116 -108
  36. package/ios/RNLlama.h +7 -7
  37. package/ios/RNLlama.mm +418 -405
  38. package/ios/RNLlamaContext.h +57 -57
  39. package/ios/RNLlamaContext.mm +835 -835
  40. package/ios/rnllama.xcframework/Info.plist +74 -74
  41. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/binary-ops.h +16 -0
  42. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +143 -0
  43. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +677 -0
  44. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
  45. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
  46. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  47. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
  48. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
  49. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
  50. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
  51. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
  52. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
  53. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
  54. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-cpu.h +138 -0
  55. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-impl.h +594 -0
  56. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  57. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
  58. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
  59. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
  60. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
  61. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +2222 -0
  62. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/gguf.h +202 -0
  63. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  64. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/json.hpp +24766 -0
  65. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
  66. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-arch.h +428 -0
  67. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-batch.h +88 -0
  68. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +56 -0
  69. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +265 -0
  70. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
  71. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
  72. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
  73. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +592 -0
  74. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +156 -0
  75. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
  76. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-io.h +35 -0
  77. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +213 -0
  78. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +21 -0
  79. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
  80. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
  81. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +409 -0
  82. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
  83. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
  84. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +1434 -0
  85. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/log.h +132 -0
  86. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  87. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  88. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ops.h +128 -0
  89. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +138 -0
  90. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sampling.h +107 -0
  91. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/sgemm.h +14 -0
  92. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/simd-mappings.h +888 -0
  93. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/speculative.h +28 -0
  94. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unary-ops.h +28 -0
  95. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
  96. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/unicode.h +66 -0
  97. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/vec.h +802 -0
  98. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
  99. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  100. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
  101. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/binary-ops.h +16 -0
  102. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
  103. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +677 -0
  104. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
  105. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
  106. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  107. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
  108. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
  109. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
  110. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
  111. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
  112. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
  113. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
  114. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +138 -0
  115. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +594 -0
  116. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  117. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
  118. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
  119. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
  120. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
  121. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2222 -0
  122. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
  123. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  124. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
  125. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
  126. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +428 -0
  127. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +88 -0
  128. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +56 -0
  129. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +265 -0
  130. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
  131. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
  132. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
  133. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +592 -0
  134. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +156 -0
  135. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
  136. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
  137. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +213 -0
  138. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +21 -0
  139. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
  140. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
  141. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +409 -0
  142. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
  143. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
  144. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1434 -0
  145. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
  146. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  147. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  148. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ops.h +128 -0
  149. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +138 -0
  150. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
  151. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +14 -0
  152. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/simd-mappings.h +888 -0
  153. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
  154. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +28 -0
  155. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
  156. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
  157. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +802 -0
  158. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  159. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
  160. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  161. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  162. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/binary-ops.h +16 -0
  163. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +143 -0
  164. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +677 -0
  165. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/cpu-common.h +72 -0
  166. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-alloc.h +76 -0
  167. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  168. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +354 -0
  169. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-common.h +1857 -0
  170. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpp.h +39 -0
  171. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
  172. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
  173. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
  174. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
  175. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-cpu.h +138 -0
  176. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-impl.h +594 -0
  177. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  178. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal.h +66 -0
  179. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +216 -0
  180. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-quants.h +100 -0
  181. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-threading.h +14 -0
  182. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +2222 -0
  183. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/gguf.h +202 -0
  184. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  185. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/json.hpp +24766 -0
  186. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-adapter.h +76 -0
  187. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-arch.h +428 -0
  188. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-batch.h +88 -0
  189. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +56 -0
  190. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +265 -0
  191. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +38 -0
  192. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cpp.h +30 -0
  193. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-grammar.h +173 -0
  194. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +592 -0
  195. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +156 -0
  196. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-impl.h +61 -0
  197. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-io.h +35 -0
  198. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +213 -0
  199. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +21 -0
  200. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-mmap.h +68 -0
  201. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-loader.h +169 -0
  202. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +409 -0
  203. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-sampling.h +32 -0
  204. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +125 -0
  205. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +1434 -0
  206. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/log.h +132 -0
  207. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  208. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  209. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ops.h +128 -0
  210. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +138 -0
  211. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sampling.h +107 -0
  212. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/sgemm.h +14 -0
  213. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/simd-mappings.h +888 -0
  214. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/speculative.h +28 -0
  215. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unary-ops.h +28 -0
  216. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode-data.h +20 -0
  217. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/unicode.h +66 -0
  218. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/vec.h +802 -0
  219. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
  220. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  221. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
  222. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/binary-ops.h +16 -0
  223. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +143 -0
  224. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +677 -0
  225. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/cpu-common.h +72 -0
  226. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-alloc.h +76 -0
  227. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend-impl.h +255 -0
  228. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +354 -0
  229. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-common.h +1857 -0
  230. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpp.h +39 -0
  231. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-aarch64.h +8 -0
  232. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-impl.h +512 -0
  233. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-quants.h +63 -0
  234. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu-traits.h +38 -0
  235. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-cpu.h +138 -0
  236. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-impl.h +594 -0
  237. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +597 -0
  238. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal.h +66 -0
  239. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +216 -0
  240. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-quants.h +100 -0
  241. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-threading.h +14 -0
  242. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +2222 -0
  243. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/gguf.h +202 -0
  244. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json-schema-to-grammar.h +21 -0
  245. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/json.hpp +24766 -0
  246. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-adapter.h +76 -0
  247. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-arch.h +428 -0
  248. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-batch.h +88 -0
  249. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +56 -0
  250. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +265 -0
  251. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +38 -0
  252. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cpp.h +30 -0
  253. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-grammar.h +173 -0
  254. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +592 -0
  255. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +156 -0
  256. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-impl.h +61 -0
  257. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-io.h +35 -0
  258. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +213 -0
  259. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +21 -0
  260. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-mmap.h +68 -0
  261. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-loader.h +169 -0
  262. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +409 -0
  263. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-sampling.h +32 -0
  264. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +125 -0
  265. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +1434 -0
  266. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/log.h +132 -0
  267. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +537 -0
  268. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +2941 -0
  269. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ops.h +128 -0
  270. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +138 -0
  271. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sampling.h +107 -0
  272. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/sgemm.h +14 -0
  273. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/simd-mappings.h +888 -0
  274. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/speculative.h +28 -0
  275. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unary-ops.h +28 -0
  276. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode-data.h +20 -0
  277. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/unicode.h +66 -0
  278. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/vec.h +802 -0
  279. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  280. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +101 -0
  281. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  282. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  283. package/jest/mock.js +203 -203
  284. package/lib/commonjs/NativeRNLlama.js +1 -2
  285. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  286. package/lib/commonjs/chat.js.map +1 -1
  287. package/lib/commonjs/grammar.js +12 -31
  288. package/lib/commonjs/grammar.js.map +1 -1
  289. package/lib/commonjs/index.js +47 -47
  290. package/lib/commonjs/index.js.map +1 -1
  291. package/lib/commonjs/package.json +1 -0
  292. package/lib/module/NativeRNLlama.js +2 -0
  293. package/lib/module/NativeRNLlama.js.map +1 -1
  294. package/lib/module/chat.js +2 -0
  295. package/lib/module/chat.js.map +1 -1
  296. package/lib/module/grammar.js +14 -31
  297. package/lib/module/grammar.js.map +1 -1
  298. package/lib/module/index.js +47 -45
  299. package/lib/module/index.js.map +1 -1
  300. package/lib/module/package.json +1 -0
  301. package/lib/typescript/NativeRNLlama.d.ts +6 -4
  302. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  303. package/lib/typescript/index.d.ts.map +1 -1
  304. package/llama-rn.podspec +48 -48
  305. package/package.json +233 -233
  306. package/src/NativeRNLlama.ts +426 -426
  307. package/src/chat.ts +44 -44
  308. package/src/grammar.ts +854 -854
  309. package/src/index.ts +495 -487
@@ -0,0 +1,594 @@
1
+ #pragma once
2
+
3
+ // GGML internal header
4
+
5
+ #include "ggml.h"
6
+ #include "gguf.h"
7
+
8
+ #include <assert.h>
9
+ #include <math.h>
10
+ #include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
11
+ #include <stdbool.h>
12
+ #include <stdint.h>
13
+ #include <string.h>
14
+
15
+ #ifdef __ARM_FEATURE_SVE
16
+ #include <arm_sve.h>
17
+ #endif // __ARM_FEATURE_SVE
18
+
19
+ #if defined(__F16C__)
20
+ #include <immintrin.h>
21
+ #endif
22
+
23
+ #ifdef __cplusplus
24
+ extern "C" {
25
+ #endif
26
+
27
+ #ifndef MIN
28
+ # define MIN(a, b) ((a) < (b) ? (a) : (b))
29
+ #endif
30
+
31
+ #ifndef MAX
32
+ # define MAX(a, b) ((a) > (b) ? (a) : (b))
33
+ #endif
34
+
35
+ // required for mmap as gguf only guarantees 32-byte alignment
36
+ #define TENSOR_ALIGNMENT 32
37
+
38
+ // static_assert should be a #define, but if it's not,
39
+ // fall back to the _Static_assert C11 keyword.
40
+ // if C99 - static_assert is noop
41
+ // ref: https://stackoverflow.com/a/53923785/4039976
42
+ #ifndef __cplusplus
43
+ #ifndef static_assert
44
+ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
45
+ #define static_assert(cond, msg) _Static_assert(cond, msg)
46
+ #else
47
+ #define static_assert(cond, msg) struct global_scope_noop_trick
48
+ #endif
49
+ #endif
50
+ #endif
51
+
52
+ static inline int lm_ggml_up32(int n) {
53
+ return (n + 31) & ~31;
54
+ }
55
+
56
+ //static inline int lm_ggml_up64(int n) {
57
+ // return (n + 63) & ~63;
58
+ //}
59
+
60
+ static inline int lm_ggml_up(int n, int m) {
61
+ // assert m is a power of 2
62
+ LM_GGML_ASSERT((m & (m - 1)) == 0);
63
+ return (n + m - 1) & ~(m - 1);
64
+ }
65
+
66
+ //
67
+ // logging
68
+ //
69
+
70
+ LM_GGML_ATTRIBUTE_FORMAT(2, 3)
71
+ LM_GGML_API void lm_ggml_log_internal (enum lm_ggml_log_level level, const char * format, ...);
72
+ LM_GGML_API void lm_ggml_log_callback_default(enum lm_ggml_log_level level, const char * text, void * user_data);
73
+
74
+ #define LM_GGML_LOG(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_NONE , __VA_ARGS__)
75
+ #define LM_GGML_LOG_INFO(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_INFO , __VA_ARGS__)
76
+ #define LM_GGML_LOG_WARN(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_WARN , __VA_ARGS__)
77
+ #define LM_GGML_LOG_ERROR(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
78
+ #define LM_GGML_LOG_DEBUG(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
79
+ #define LM_GGML_LOG_CONT(...) lm_ggml_log_internal(LM_GGML_LOG_LEVEL_CONT , __VA_ARGS__)
80
+
81
+ #define LM_GGML_DEBUG 0
82
+
83
+ #if (LM_GGML_DEBUG >= 1)
84
+ #define LM_GGML_PRINT_DEBUG(...) LM_GGML_LOG_DEBUG(__VA_ARGS__)
85
+ #else
86
+ #define LM_GGML_PRINT_DEBUG(...)
87
+ #endif
88
+
89
+ #if (LM_GGML_DEBUG >= 5)
90
+ #define LM_GGML_PRINT_DEBUG_5(...) LM_GGML_LOG_DEBUG(__VA_ARGS__)
91
+ #else
92
+ #define LM_GGML_PRINT_DEBUG_5(...)
93
+ #endif
94
+
95
+ #if (LM_GGML_DEBUG >= 10)
96
+ #define LM_GGML_PRINT_DEBUG_10(...) LM_GGML_LOG_DEBUG(__VA_ARGS__)
97
+ #else
98
+ #define LM_GGML_PRINT_DEBUG_10(...)
99
+ #endif
100
+
101
+ // tensor params
102
+
103
+ static void lm_ggml_set_op_params(struct lm_ggml_tensor * tensor, const void * params, size_t params_size) {
104
+ LM_GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
105
+ assert(params_size <= LM_GGML_MAX_OP_PARAMS);
106
+ memcpy(tensor->op_params, params, params_size);
107
+ }
108
+
109
+ static int32_t lm_ggml_get_op_params_i32(const struct lm_ggml_tensor * tensor, uint32_t i) {
110
+ assert(i < LM_GGML_MAX_OP_PARAMS / sizeof(int32_t));
111
+ return ((const int32_t *)(tensor->op_params))[i];
112
+ }
113
+
114
+ static float lm_ggml_get_op_params_f32(const struct lm_ggml_tensor * tensor, uint32_t i) {
115
+ assert(i < LM_GGML_MAX_OP_PARAMS / sizeof(float));
116
+ return ((const float *)(tensor->op_params))[i];
117
+ }
118
+
119
+ static void lm_ggml_set_op_params_i32(struct lm_ggml_tensor * tensor, uint32_t i, int32_t value) {
120
+ assert(i < LM_GGML_MAX_OP_PARAMS / sizeof(int32_t));
121
+ ((int32_t *)(tensor->op_params))[i] = value;
122
+ }
123
+
124
+ static void lm_ggml_set_op_params_f32(struct lm_ggml_tensor * tensor, uint32_t i, float value) {
125
+ assert(i < LM_GGML_MAX_OP_PARAMS / sizeof(float));
126
+ ((float *)(tensor->op_params))[i] = value;
127
+ }
128
+
129
+ struct lm_ggml_map_custom1_op_params {
130
+ lm_ggml_custom1_op_t fun;
131
+ int n_tasks;
132
+ void * userdata;
133
+ };
134
+
135
+ struct lm_ggml_map_custom2_op_params {
136
+ lm_ggml_custom2_op_t fun;
137
+ int n_tasks;
138
+ void * userdata;
139
+ };
140
+
141
+ struct lm_ggml_map_custom3_op_params {
142
+ lm_ggml_custom3_op_t fun;
143
+ int n_tasks;
144
+ void * userdata;
145
+ };
146
+
147
+ // bitset
148
+
149
+ typedef uint32_t lm_ggml_bitset_t;
150
+
151
+ static_assert(sizeof(lm_ggml_bitset_t) == 4, "bitset_t constants must be updated");
152
+ #define BITSET_SHR 5 // log2(sizeof(lm_ggml_bitset_t)*8)
153
+ #define BITSET_MASK (sizeof(lm_ggml_bitset_t)*8 - 1)
154
+
155
+ static size_t lm_ggml_bitset_size(size_t n) {
156
+ return (n + BITSET_MASK) >> BITSET_SHR;
157
+ }
158
+
159
+ static inline bool lm_ggml_bitset_get(const lm_ggml_bitset_t * bitset, size_t i) {
160
+ return !!(bitset[i >> BITSET_SHR] & (1u << (i & BITSET_MASK)));
161
+ }
162
+
163
+ static inline void lm_ggml_bitset_set(lm_ggml_bitset_t * bitset, size_t i) {
164
+ bitset[i >> BITSET_SHR] |= (1u << (i & BITSET_MASK));
165
+ }
166
+
167
+ static inline void lm_ggml_bitset_clear(lm_ggml_bitset_t * bitset, size_t i) {
168
+ bitset[i >> BITSET_SHR] &= ~(1u << (i & BITSET_MASK));
169
+ }
170
+
171
+ // hash set
172
+
173
+ #define LM_GGML_HASHSET_FULL ((size_t)-1)
174
+ #define LM_GGML_HASHSET_ALREADY_EXISTS ((size_t)-2)
175
+
176
+ struct lm_ggml_hash_set {
177
+ size_t size;
178
+ lm_ggml_bitset_t * used; // whether or not the keys are in use i.e. set
179
+ struct lm_ggml_tensor ** keys; // actual tensors in the set, keys[i] is only defined if lm_ggml_bitset_get(used, i)
180
+ };
181
+
182
+ struct lm_ggml_hash_set lm_ggml_hash_set_new(size_t size);
183
+ void lm_ggml_hash_set_free(struct lm_ggml_hash_set * hash_set);
184
+
185
+ // returns the minimum size for a hash set that can hold min_sz elements
186
+ size_t lm_ggml_hash_size(size_t min_sz);
187
+
188
+ // remove all elements from the hash set
189
+ void lm_ggml_hash_set_reset(struct lm_ggml_hash_set * hash_set);
190
+
191
+ // returns true if key is in the hash set
192
+ static bool lm_ggml_hash_contains(const struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key);
193
+
194
+ // returns LM_GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted
195
+ static size_t lm_ggml_hash_find(const struct lm_ggml_hash_set * hash_set, const struct lm_ggml_tensor * key);
196
+
197
+ // returns LM_GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
198
+ static size_t lm_ggml_hash_insert(struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key);
199
+
200
+ // return index, asserts if table is full
201
+ static size_t lm_ggml_hash_find_or_insert(struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key);
202
+
203
+ // hash function for lm_ggml_tensor
204
+ static inline size_t lm_ggml_hash(const struct lm_ggml_tensor * p) {
205
+ // the last 4 bits are always zero due to alignment
206
+ return (size_t)(uintptr_t)p >> 4;
207
+ }
208
+
209
+ static size_t lm_ggml_hash_find(const struct lm_ggml_hash_set * hash_set, const struct lm_ggml_tensor * key) {
210
+ size_t h = lm_ggml_hash(key) % hash_set->size;
211
+
212
+ // linear probing
213
+ size_t i = h;
214
+ while (lm_ggml_bitset_get(hash_set->used, i) && hash_set->keys[i] != key) {
215
+ i = (i + 1) % hash_set->size;
216
+ if (i == h) {
217
+ // visited all hash table entries -> not found
218
+ return LM_GGML_HASHSET_FULL;
219
+ }
220
+ }
221
+ return i;
222
+ }
223
+
224
+ static bool lm_ggml_hash_contains(const struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key) {
225
+ size_t i = lm_ggml_hash_find(hash_set, key);
226
+ return i != LM_GGML_HASHSET_FULL && lm_ggml_bitset_get(hash_set->used, i);
227
+ }
228
+
229
+ static size_t lm_ggml_hash_insert(struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key) {
230
+ size_t h = lm_ggml_hash(key) % hash_set->size;
231
+
232
+ // linear probing
233
+ size_t i = h;
234
+ do {
235
+ if (!lm_ggml_bitset_get(hash_set->used, i)) {
236
+ lm_ggml_bitset_set(hash_set->used, i);
237
+ hash_set->keys[i] = key;
238
+ return i;
239
+ }
240
+ if (hash_set->keys[i] == key) {
241
+ return LM_GGML_HASHSET_ALREADY_EXISTS;
242
+ }
243
+ i = (i + 1) % hash_set->size;
244
+ } while (i != h);
245
+
246
+ // visited all hash table entries -> not found
247
+ LM_GGML_ABORT("fatal error");
248
+ }
249
+
250
+ static size_t lm_ggml_hash_find_or_insert(struct lm_ggml_hash_set * hash_set, struct lm_ggml_tensor * key) {
251
+ size_t h = lm_ggml_hash(key) % hash_set->size;
252
+
253
+ // linear probing
254
+ size_t i = h;
255
+ do {
256
+ if (!lm_ggml_bitset_get(hash_set->used, i)) {
257
+ lm_ggml_bitset_set(hash_set->used, i);
258
+ hash_set->keys[i] = key;
259
+ return i;
260
+ }
261
+ if (hash_set->keys[i] == key) {
262
+ return i;
263
+ }
264
+ i = (i + 1) % hash_set->size;
265
+ } while (i != h);
266
+
267
+ // visited all hash table entries -> not found
268
+ LM_GGML_ABORT("fatal error");
269
+ }
270
+
271
+ // computation graph
272
+
273
+ enum lm_ggml_cgraph_eval_order {
274
+ LM_GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0,
275
+ LM_GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT,
276
+ LM_GGML_CGRAPH_EVAL_ORDER_COUNT
277
+ };
278
+
279
+ struct lm_ggml_cgraph {
280
+ int size; // maximum number of nodes/leafs/grads/grad_accs
281
+ int n_nodes; // number of nodes currently in use
282
+ int n_leafs; // number of leafs currently in use
283
+
284
+ struct lm_ggml_tensor ** nodes; // tensors with data that can change if the graph is evaluated
285
+ struct lm_ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes
286
+ struct lm_ggml_tensor ** grad_accs; // accumulators for node gradients
287
+ struct lm_ggml_tensor ** leafs; // tensors with constant data
288
+
289
+ struct lm_ggml_hash_set visited_hash_set;
290
+
291
+ enum lm_ggml_cgraph_eval_order order;
292
+ };
293
+
294
+ // returns a slice of cgraph with nodes [i0, i1)
295
+ // the slice does not have leafs or gradients
296
+ // if you need the gradients, get them from the original graph
297
+ struct lm_ggml_cgraph lm_ggml_graph_view(struct lm_ggml_cgraph * cgraph, int i0, int i1);
298
+
299
+ // Memory allocation
300
+
301
+ LM_GGML_API void * lm_ggml_aligned_malloc(size_t size);
302
+ LM_GGML_API void lm_ggml_aligned_free(void * ptr, size_t size);
303
+
304
+ // FP16 to FP32 conversion
305
+
306
+ // 16-bit float
307
+ // on Arm, we use __fp16
308
+ // on x86, we use uint16_t
309
+ //
310
+ // for old CUDA compilers (<= 11), we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/10616
311
+ // for MUSA compilers , we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/11843
312
+ //
313
+ #if defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__)
314
+
315
+ // if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
316
+ //
317
+ // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
318
+ //
319
+ #include <arm_neon.h>
320
+
321
+ #define LM_GGML_COMPUTE_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
322
+ #define LM_GGML_COMPUTE_FP32_TO_FP16(x) lm_ggml_compute_fp32_to_fp16(x)
323
+
324
+ #define LM_GGML_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
325
+
326
+ static inline float lm_ggml_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
327
+ __fp16 tmp;
328
+ memcpy(&tmp, &h, sizeof(lm_ggml_fp16_t));
329
+ return (float)tmp;
330
+ }
331
+
332
+ static inline lm_ggml_fp16_t lm_ggml_compute_fp32_to_fp16(float f) {
333
+ lm_ggml_fp16_t res;
334
+ __fp16 tmp = f;
335
+ memcpy(&res, &tmp, sizeof(lm_ggml_fp16_t));
336
+ return res;
337
+ }
338
+
339
+ #elif defined(__F16C__)
340
+
341
+ #ifdef _MSC_VER
342
+ #define LM_GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
343
+ #define LM_GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
344
+ #else
345
+ #define LM_GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
346
+ #define LM_GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
347
+ #endif
348
+
349
+ #elif defined(__POWER9_VECTOR__)
350
+
351
+ #define LM_GGML_COMPUTE_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
352
+ #define LM_GGML_COMPUTE_FP32_TO_FP16(x) lm_ggml_compute_fp32_to_fp16(x)
353
+ /* the inline asm below is about 12% faster than the lookup method */
354
+ #define LM_GGML_FP16_TO_FP32(x) LM_GGML_COMPUTE_FP16_TO_FP32(x)
355
+ #define LM_GGML_FP32_TO_FP16(x) LM_GGML_COMPUTE_FP32_TO_FP16(x)
356
+
357
+ static inline float lm_ggml_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
358
+ float f;
359
+ double d;
360
+ __asm__(
361
+ "mtfprd %0,%2\n"
362
+ "xscvhpdp %0,%0\n"
363
+ "frsp %1,%0\n" :
364
+ /* temp */ "=d"(d),
365
+ /* out */ "=f"(f):
366
+ /* in */ "r"(h));
367
+ return f;
368
+ }
369
+
370
+ static inline lm_ggml_fp16_t lm_ggml_compute_fp32_to_fp16(float f) {
371
+ double d;
372
+ lm_ggml_fp16_t r;
373
+ __asm__( /* xscvdphp can work on double or single precision */
374
+ "xscvdphp %0,%2\n"
375
+ "mffprd %1,%0\n" :
376
+ /* temp */ "=d"(d),
377
+ /* out */ "=r"(r):
378
+ /* in */ "f"(f));
379
+ return r;
380
+ }
381
+
382
+ #elif defined(__riscv) && defined(LM_GGML_RV_ZFH)
383
+
384
+ static inline float lm_ggml_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
385
+ float f;
386
+ __asm__(
387
+ "fmv.h.x %[f], %[h]\n\t"
388
+ "fcvt.s.h %[f], %[f]"
389
+ : [f] "=&f" (f)
390
+ : [h] "r" (h)
391
+ );
392
+ return f;
393
+ }
394
+
395
+ static inline lm_ggml_fp16_t lm_ggml_compute_fp32_to_fp16(float f) {
396
+ lm_ggml_fp16_t res;
397
+ __asm__(
398
+ "fcvt.h.s %[f], %[f]\n\t"
399
+ "fmv.x.h %[h], %[f]"
400
+ : [h] "=&r" (res)
401
+ : [f] "f" (f)
402
+ );
403
+ return res;
404
+ }
405
+
406
+ #define LM_GGML_COMPUTE_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
407
+ #define LM_GGML_COMPUTE_FP32_TO_FP16(x) lm_ggml_compute_fp32_to_fp16(x)
408
+ #define LM_GGML_FP16_TO_FP32(x) LM_GGML_COMPUTE_FP16_TO_FP32(x)
409
+ #define LM_GGML_FP32_TO_FP16(x) LM_GGML_COMPUTE_FP32_TO_FP16(x)
410
+
411
+ #else
412
+
413
+ // FP16 <-> FP32
414
+ // ref: https://github.com/Maratyszcza/FP16
415
+
416
+ static inline float fp32_from_bits(uint32_t w) {
417
+ union {
418
+ uint32_t as_bits;
419
+ float as_value;
420
+ } fp32;
421
+ fp32.as_bits = w;
422
+ return fp32.as_value;
423
+ }
424
+
425
+ static inline uint32_t fp32_to_bits(float f) {
426
+ union {
427
+ float as_value;
428
+ uint32_t as_bits;
429
+ } fp32;
430
+ fp32.as_value = f;
431
+ return fp32.as_bits;
432
+ }
433
+
434
+ static inline float lm_ggml_compute_fp16_to_fp32(lm_ggml_fp16_t h) {
435
+ const uint32_t w = (uint32_t) h << 16;
436
+ const uint32_t sign = w & UINT32_C(0x80000000);
437
+ const uint32_t two_w = w + w;
438
+
439
+ const uint32_t exp_offset = UINT32_C(0xE0) << 23;
440
+ #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
441
+ const float exp_scale = 0x1.0p-112f;
442
+ #else
443
+ const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
444
+ #endif
445
+ const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
446
+
447
+ const uint32_t magic_mask = UINT32_C(126) << 23;
448
+ const float magic_bias = 0.5f;
449
+ const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
450
+
451
+ const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
452
+ const uint32_t result = sign |
453
+ (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
454
+ return fp32_from_bits(result);
455
+ }
456
+
457
+ static inline lm_ggml_fp16_t lm_ggml_compute_fp32_to_fp16(float f) {
458
+ #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
459
+ const float scale_to_inf = 0x1.0p+112f;
460
+ const float scale_to_zero = 0x1.0p-110f;
461
+ #else
462
+ const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
463
+ const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
464
+ #endif
465
+ float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
466
+
467
+ const uint32_t w = fp32_to_bits(f);
468
+ const uint32_t shl1_w = w + w;
469
+ const uint32_t sign = w & UINT32_C(0x80000000);
470
+ uint32_t bias = shl1_w & UINT32_C(0xFF000000);
471
+ if (bias < UINT32_C(0x71000000)) {
472
+ bias = UINT32_C(0x71000000);
473
+ }
474
+
475
+ base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
476
+ const uint32_t bits = fp32_to_bits(base);
477
+ const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
478
+ const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
479
+ const uint32_t nonsign = exp_bits + mantissa_bits;
480
+ return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
481
+ }
482
+
483
+ #define LM_GGML_COMPUTE_FP16_TO_FP32(x) lm_ggml_compute_fp16_to_fp32(x)
484
+ #define LM_GGML_COMPUTE_FP32_TO_FP16(x) lm_ggml_compute_fp32_to_fp16(x)
485
+
486
+ #endif // defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__)
487
+
488
+ // precomputed f32 table for f16 (256 KB)
489
+ // defined in ggml.c, initialized in lm_ggml_init()
490
+ LM_GGML_API float lm_ggml_table_f32_f16[1 << 16];
491
+
492
+ // On ARM NEON, it's quicker to directly convert x -> x instead of calling into lm_ggml_lookup_fp16_to_fp32,
493
+ // so we define LM_GGML_FP16_TO_FP32 and LM_GGML_FP32_TO_FP16 elsewhere for NEON.
494
+ // This is also true for POWER9.
495
+ #if !defined(LM_GGML_FP16_TO_FP32)
496
+ inline static float lm_ggml_lookup_fp16_to_fp32(lm_ggml_fp16_t f) {
497
+ uint16_t s;
498
+ memcpy(&s, &f, sizeof(uint16_t));
499
+ return lm_ggml_table_f32_f16[s];
500
+ }
501
+
502
+ #define LM_GGML_FP16_TO_FP32(x) lm_ggml_lookup_fp16_to_fp32(x)
503
+ #endif
504
+
505
+ #if !defined(LM_GGML_FP32_TO_FP16)
506
+ #define LM_GGML_FP32_TO_FP16(x) LM_GGML_COMPUTE_FP32_TO_FP16(x)
507
+ #endif
508
+
509
+ /**
510
+ * Converts brain16 to float32.
511
+ *
512
+ * The bfloat16 floating point format has the following structure:
513
+ *
514
+ * ┌sign
515
+ * │
516
+ * │ ┌exponent
517
+ * │ │
518
+ * │ │ ┌mantissa
519
+ * │ │ │
520
+ * │┌──┴───┐┌─┴───┐
521
+ * 0b0000000000000000 brain16
522
+ *
523
+ * Since bf16 has the same number of exponent bits as a 32bit float,
524
+ * encoding and decoding numbers becomes relatively straightforward.
525
+ *
526
+ * ┌sign
527
+ * │
528
+ * │ ┌exponent
529
+ * │ │
530
+ * │ │ ┌mantissa
531
+ * │ │ │
532
+ * │┌──┴───┐┌─┴───────────────────┐
533
+ * 0b00000000000000000000000000000000 IEEE binary32
534
+ *
535
+ * For comparison, the standard fp16 format has fewer exponent bits.
536
+ *
537
+ * ┌sign
538
+ * │
539
+ * │ ┌exponent
540
+ * │ │
541
+ * │ │ ┌mantissa
542
+ * │ │ │
543
+ * │┌─┴─┐┌─┴──────┐
544
+ * 0b0000000000000000 IEEE binary16
545
+ *
546
+ * @see IEEE 754-2008
547
+ */
548
+ static inline float lm_ggml_compute_bf16_to_fp32(lm_ggml_bf16_t h) {
549
+ union {
550
+ float f;
551
+ uint32_t i;
552
+ } u;
553
+ u.i = (uint32_t)h.bits << 16;
554
+ return u.f;
555
+ }
556
+
557
+ /**
558
+ * Converts float32 to brain16.
559
+ *
560
+ * This is binary identical with Google Brain float conversion.
561
+ * Floats shall round to nearest even, and NANs shall be quiet.
562
+ * Subnormals aren't flushed to zero, except perhaps when used.
563
+ * This code should vectorize nicely if using modern compilers.
564
+ */
565
+ static inline lm_ggml_bf16_t lm_ggml_compute_fp32_to_bf16(float s) {
566
+ lm_ggml_bf16_t h;
567
+ union {
568
+ float f;
569
+ uint32_t i;
570
+ } u;
571
+ u.f = s;
572
+ if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */
573
+ h.bits = (u.i >> 16) | 64; /* force to quiet */
574
+ return h;
575
+ }
576
+ h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16;
577
+ return h;
578
+ }
579
+
580
+ #define LM_GGML_FP32_TO_BF16(x) lm_ggml_compute_fp32_to_bf16(x)
581
+ #define LM_GGML_BF16_TO_FP32(x) lm_ggml_compute_bf16_to_fp32(x)
582
+
583
+ #ifdef __cplusplus
584
+ }
585
+ #endif
586
+
587
+ #ifdef __cplusplus
588
+ #include <vector>
589
+
590
+ // expose GGUF internals for test code
591
+ LM_GGML_API size_t lm_gguf_type_size(enum lm_gguf_type type);
592
+ LM_GGML_API struct lm_gguf_context * lm_gguf_init_from_file_impl(FILE * file, struct lm_gguf_init_params params);
593
+ LM_GGML_API void lm_gguf_write_to_buf(const struct lm_gguf_context * ctx, std::vector<int8_t> & buf, bool only_meta);
594
+ #endif // __cplusplus