cui-llama.rn 1.4.4 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (216) hide show
  1. package/android/src/main/CMakeLists.txt +9 -2
  2. package/android/src/main/jni.cpp +54 -34
  3. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  4. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  5. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  9. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  10. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  11. package/cpp/binary-ops.cpp +158 -0
  12. package/cpp/binary-ops.h +16 -0
  13. package/cpp/chat.cpp +1769 -1085
  14. package/cpp/chat.h +143 -0
  15. package/cpp/common.cpp +1562 -1996
  16. package/cpp/common.h +677 -744
  17. package/cpp/cpu-common.h +72 -0
  18. package/cpp/ggml-alloc.c +1039 -1030
  19. package/cpp/ggml-alloc.h +1 -1
  20. package/cpp/ggml-backend-impl.h +255 -255
  21. package/cpp/ggml-backend-reg.cpp +586 -582
  22. package/cpp/ggml-backend.cpp +2004 -2002
  23. package/cpp/ggml-backend.h +354 -354
  24. package/cpp/ggml-common.h +1857 -1851
  25. package/cpp/ggml-cpp.h +39 -39
  26. package/cpp/ggml-cpu-aarch64.cpp +5725 -4247
  27. package/cpp/ggml-cpu-aarch64.h +8 -8
  28. package/cpp/ggml-cpu-impl.h +512 -380
  29. package/cpp/ggml-cpu-quants.c +13026 -11517
  30. package/cpp/ggml-cpu-traits.cpp +36 -36
  31. package/cpp/ggml-cpu-traits.h +38 -38
  32. package/cpp/ggml-cpu.c +3438 -14485
  33. package/cpp/ggml-cpu.cpp +655 -633
  34. package/cpp/ggml-cpu.h +138 -135
  35. package/cpp/ggml-impl.h +594 -567
  36. package/cpp/ggml-metal-impl.h +312 -3
  37. package/cpp/ggml-metal.h +66 -66
  38. package/cpp/ggml-metal.m +5360 -5002
  39. package/cpp/ggml-opt.cpp +854 -854
  40. package/cpp/ggml-opt.h +216 -216
  41. package/cpp/ggml-quants.c +5238 -5238
  42. package/cpp/ggml-threading.h +14 -14
  43. package/cpp/ggml.c +6618 -6524
  44. package/cpp/ggml.h +2222 -2194
  45. package/cpp/gguf.cpp +1330 -1329
  46. package/cpp/gguf.h +202 -202
  47. package/cpp/json-schema-to-grammar.cpp +1024 -1025
  48. package/cpp/json-schema-to-grammar.h +21 -22
  49. package/cpp/json.hpp +24766 -24766
  50. package/cpp/llama-adapter.cpp +382 -347
  51. package/cpp/llama-adapter.h +76 -74
  52. package/cpp/llama-arch.cpp +1714 -1492
  53. package/cpp/llama-arch.h +428 -402
  54. package/cpp/llama-batch.cpp +368 -368
  55. package/cpp/llama-batch.h +88 -88
  56. package/cpp/llama-chat.cpp +640 -587
  57. package/cpp/llama-chat.h +56 -53
  58. package/cpp/llama-context.cpp +2831 -1775
  59. package/cpp/llama-context.h +265 -128
  60. package/cpp/llama-cparams.cpp +1 -1
  61. package/cpp/llama-cparams.h +38 -37
  62. package/cpp/llama-cpp.h +30 -30
  63. package/cpp/llama-grammar.cpp +1219 -1219
  64. package/cpp/llama-grammar.h +173 -164
  65. package/cpp/llama-graph.cpp +1695 -0
  66. package/cpp/llama-graph.h +592 -0
  67. package/cpp/llama-hparams.cpp +79 -71
  68. package/cpp/llama-hparams.h +156 -139
  69. package/cpp/llama-impl.cpp +167 -167
  70. package/cpp/llama-impl.h +61 -61
  71. package/cpp/llama-io.cpp +15 -0
  72. package/cpp/llama-io.h +35 -0
  73. package/cpp/llama-kv-cache.cpp +1380 -718
  74. package/cpp/llama-kv-cache.h +213 -218
  75. package/cpp/llama-memory.cpp +1 -0
  76. package/cpp/llama-memory.h +21 -0
  77. package/cpp/llama-mmap.cpp +600 -590
  78. package/cpp/llama-mmap.h +68 -68
  79. package/cpp/llama-model-loader.cpp +1129 -1124
  80. package/cpp/llama-model-loader.h +169 -167
  81. package/cpp/llama-model.cpp +13080 -4023
  82. package/cpp/llama-model.h +409 -370
  83. package/cpp/llama-sampling.cpp +2563 -2525
  84. package/cpp/llama-sampling.h +32 -32
  85. package/cpp/llama-vocab.cpp +3295 -3252
  86. package/cpp/llama-vocab.h +125 -125
  87. package/cpp/llama.cpp +351 -10137
  88. package/cpp/llama.h +1434 -1340
  89. package/cpp/log.cpp +427 -423
  90. package/cpp/log.h +132 -132
  91. package/cpp/{chat-template.hpp → minja/chat-template.hpp} +537 -529
  92. package/cpp/{minja.hpp → minja/minja.hpp} +2941 -2883
  93. package/cpp/ops.cpp +8723 -0
  94. package/cpp/ops.h +128 -0
  95. package/cpp/rn-llama.cpp +45 -71
  96. package/cpp/rn-llama.h +3 -3
  97. package/cpp/sampling.cpp +573 -532
  98. package/cpp/sgemm.cpp +3043 -2598
  99. package/cpp/sgemm.h +14 -14
  100. package/cpp/simd-mappings.h +888 -0
  101. package/cpp/speculative.cpp +278 -277
  102. package/cpp/speculative.h +28 -28
  103. package/cpp/unary-ops.cpp +186 -0
  104. package/cpp/unary-ops.h +28 -0
  105. package/cpp/vec.cpp +258 -0
  106. package/cpp/vec.h +802 -0
  107. package/ios/CMakeLists.txt +5 -2
  108. package/ios/RNLlama.mm +2 -2
  109. package/ios/RNLlamaContext.mm +40 -24
  110. package/package.json +1 -1
  111. package/src/NativeRNLlama.ts +6 -4
  112. package/src/index.ts +3 -1
  113. package/android/src/main/build-arm64/CMakeCache.txt +0 -429
  114. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeCCompiler.cmake +0 -81
  115. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeCXXCompiler.cmake +0 -101
  116. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeDetermineCompilerABI_C.bin +0 -0
  117. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeDetermineCompilerABI_CXX.bin +0 -0
  118. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CMakeSystem.cmake +0 -15
  119. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdC/CMakeCCompilerId.c +0 -904
  120. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdC/CMakeCCompilerId.o +0 -0
  121. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdCXX/CMakeCXXCompilerId.cpp +0 -919
  122. package/android/src/main/build-arm64/CMakeFiles/3.31.4/CompilerIdCXX/CMakeCXXCompilerId.o +0 -0
  123. package/android/src/main/build-arm64/CMakeFiles/CMakeConfigureLog.yaml +0 -431
  124. package/android/src/main/build-arm64/CMakeFiles/CMakeDirectoryInformation.cmake +0 -16
  125. package/android/src/main/build-arm64/CMakeFiles/Makefile.cmake +0 -165
  126. package/android/src/main/build-arm64/CMakeFiles/Makefile2 +0 -297
  127. package/android/src/main/build-arm64/CMakeFiles/Progress/1 +0 -1
  128. package/android/src/main/build-arm64/CMakeFiles/Progress/2 +0 -1
  129. package/android/src/main/build-arm64/CMakeFiles/Progress/3 +0 -1
  130. package/android/src/main/build-arm64/CMakeFiles/Progress/4 +0 -1
  131. package/android/src/main/build-arm64/CMakeFiles/Progress/5 +0 -1
  132. package/android/src/main/build-arm64/CMakeFiles/Progress/6 +0 -1
  133. package/android/src/main/build-arm64/CMakeFiles/Progress/count.txt +0 -1
  134. package/android/src/main/build-arm64/CMakeFiles/TargetDirectories.txt +0 -8
  135. package/android/src/main/build-arm64/CMakeFiles/cmake.check_cache +0 -1
  136. package/android/src/main/build-arm64/CMakeFiles/progress.marks +0 -1
  137. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-alloc.c.o +0 -0
  138. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-alloc.c.o.d +0 -58
  139. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend-reg.cpp.o +0 -0
  140. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend-reg.cpp.o.d +0 -756
  141. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend.cpp.o +0 -0
  142. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-backend.cpp.o.d +0 -709
  143. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-aarch64.cpp.o +0 -0
  144. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-aarch64.cpp.o.d +0 -714
  145. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-quants.c.o +0 -0
  146. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-quants.c.o.d +0 -62
  147. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-traits.cpp.o +0 -0
  148. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu-traits.cpp.o.d +0 -708
  149. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.c.o +0 -0
  150. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.c.o.d +0 -113
  151. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.cpp.o +0 -0
  152. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-cpu.cpp.o.d +0 -713
  153. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-opt.cpp.o +0 -0
  154. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-opt.cpp.o.d +0 -763
  155. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-quants.c.o +0 -0
  156. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-quants.c.o.d +0 -61
  157. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-threading.cpp.o +0 -0
  158. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml-threading.cpp.o.d +0 -707
  159. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml.c.o +0 -0
  160. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/ggml.c.o.d +0 -104
  161. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/gguf.cpp.o +0 -0
  162. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/gguf.cpp.o.d +0 -714
  163. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/log.cpp.o +0 -0
  164. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/D_/dev/react-native/cui-llama.rn/cpp/log.cpp.o.d +0 -723
  165. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/DependInfo.cmake +0 -62
  166. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/build.make +0 -722
  167. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/cmake_clean.cmake +0 -89
  168. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/compiler_depend.make +0 -2
  169. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/compiler_depend.ts +0 -2
  170. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/depend.make +0 -2
  171. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/flags.make +0 -17
  172. package/android/src/main/build-arm64/CMakeFiles/rnllama.dir/progress.make +0 -41
  173. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/DependInfo.cmake +0 -62
  174. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/build.make +0 -722
  175. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/cmake_clean.cmake +0 -89
  176. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/compiler_depend.make +0 -2
  177. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/compiler_depend.ts +0 -2
  178. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/depend.make +0 -2
  179. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/flags.make +0 -17
  180. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8.dir/progress.make +0 -41
  181. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/DependInfo.cmake +0 -62
  182. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/build.make +0 -722
  183. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/cmake_clean.cmake +0 -89
  184. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/compiler_depend.make +0 -2
  185. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/compiler_depend.ts +0 -2
  186. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/depend.make +0 -2
  187. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/flags.make +0 -17
  188. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2.dir/progress.make +0 -41
  189. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/DependInfo.cmake +0 -62
  190. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/build.make +0 -722
  191. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/cmake_clean.cmake +0 -89
  192. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/compiler_depend.make +0 -2
  193. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/compiler_depend.ts +0 -2
  194. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/depend.make +0 -2
  195. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/flags.make +0 -17
  196. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod.dir/progress.make +0 -41
  197. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/DependInfo.cmake +0 -62
  198. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/build.make +0 -722
  199. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/cmake_clean.cmake +0 -89
  200. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/compiler_depend.make +0 -2
  201. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/compiler_depend.ts +0 -2
  202. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/depend.make +0 -2
  203. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/flags.make +0 -17
  204. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_dotprod_i8mm.dir/progress.make +0 -41
  205. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/DependInfo.cmake +0 -62
  206. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/build.make +0 -722
  207. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/cmake_clean.cmake +0 -89
  208. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/compiler_depend.make +0 -2
  209. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/compiler_depend.ts +0 -2
  210. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/depend.make +0 -2
  211. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/flags.make +0 -17
  212. package/android/src/main/build-arm64/CMakeFiles/rnllama_v8_2_i8mm.dir/progress.make +0 -41
  213. package/android/src/main/build-arm64/Makefile +0 -1862
  214. package/android/src/main/build-arm64/cmake_install.cmake +0 -66
  215. package/cpp/chat.hpp +0 -55
  216. package/cpp/rn-llama.hpp +0 -913
@@ -19,6 +19,10 @@ set(
19
19
  ${RNLLAMA_LIB_DIR}/ggml-alloc.c
20
20
  ${RNLLAMA_LIB_DIR}/ggml-backend.cpp
21
21
  ${RNLLAMA_LIB_DIR}/ggml-backend-reg.cpp
22
+ ${RNLLAMA_LIB_DIR}/ops.cpp
23
+ ${RNLLAMA_LIB_DIR}/unary-ops.cpp
24
+ ${RNLLAMA_LIB_DIR}/binary-ops.cpp
25
+ ${RNLLAMA_LIB_DIR}/vec.cpp
22
26
  ${RNLLAMA_LIB_DIR}/ggml-cpu.c
23
27
  ${RNLLAMA_LIB_DIR}/ggml-cpu.cpp
24
28
  ${RNLLAMA_LIB_DIR}/ggml-cpu-aarch64.cpp
@@ -46,16 +50,19 @@ set(
46
50
  ${RNLLAMA_LIB_DIR}/llama-model-loader.cpp
47
51
  ${RNLLAMA_LIB_DIR}/llama-mmap.cpp
48
52
  ${RNLLAMA_LIB_DIR}/llama-vocab.cpp
53
+ ${RNLLAMA_LIB_DIR}/llama-memory.cpp
54
+ ${RNLLAMA_LIB_DIR}/llama-io.cpp
55
+ ${RNLLAMA_LIB_DIR}/llama-graph.cpp
49
56
  ${RNLLAMA_LIB_DIR}/sampling.cpp
50
57
  ${RNLLAMA_LIB_DIR}/unicode-data.cpp
51
58
  ${RNLLAMA_LIB_DIR}/unicode.cpp
52
59
  ${RNLLAMA_LIB_DIR}/sgemm.cpp
53
60
  ${RNLLAMA_LIB_DIR}/common.cpp
54
61
  ${RNLLAMA_LIB_DIR}/chat.cpp
55
- ${RNLLAMA_LIB_DIR}/chat-template.hpp
56
62
  ${RNLLAMA_LIB_DIR}/json-schema-to-grammar.cpp
57
- ${RNLLAMA_LIB_DIR}/minja.hpp
58
63
  ${RNLLAMA_LIB_DIR}/json.hpp
64
+ ${RNLLAMA_LIB_DIR}/minja/minja.hpp
65
+ ${RNLLAMA_LIB_DIR}/minja/chat-template.hpp
59
66
  ${RNLLAMA_LIB_DIR}/rn-llama.cpp
60
67
  ${CMAKE_SOURCE_DIR}/jni-utils.h
61
68
  ${CMAKE_SOURCE_DIR}/jni.cpp
@@ -11,6 +11,7 @@
11
11
  #include <unordered_map>
12
12
  #include "json-schema-to-grammar.h"
13
13
  #include "llama.h"
14
+ #include "chat.h"
14
15
  #include "llama-impl.h"
15
16
  #include "ggml.h"
16
17
  #include "rn-llama.h"
@@ -263,7 +264,7 @@ Java_com_rnllama_LlamaContext_initContext(
263
264
  }
264
265
 
265
266
  const char *model_path_chars = env->GetStringUTFChars(model_path_str, nullptr);
266
- defaultParams.model = model_path_chars;
267
+ defaultParams.model = { model_path_chars };
267
268
 
268
269
  const char *chat_template_chars = env->GetStringUTFChars(chat_template, nullptr);
269
270
  defaultParams.chat_template = chat_template_chars;
@@ -445,7 +446,7 @@ Java_com_rnllama_LlamaContext_loadModelDetails(
445
446
 
446
447
  auto default_caps = createWriteableMap(env);
447
448
 
448
- auto default_tmpl = llama->templates.template_default.get();
449
+ auto default_tmpl = llama->templates.get()->template_default.get();
449
450
  auto default_tmpl_caps = default_tmpl->original_caps();
450
451
  putBoolean(env, default_caps, "tools", default_tmpl_caps.supports_tools);
451
452
  putBoolean(env, default_caps, "toolCalls", default_tmpl_caps.supports_tool_calls);
@@ -456,7 +457,7 @@ Java_com_rnllama_LlamaContext_loadModelDetails(
456
457
  putMap(env, minja, "defaultCaps", default_caps);
457
458
 
458
459
  putBoolean(env, minja, "toolUse", llama->validateModelChatTemplate(true, "tool_use"));
459
- auto tool_use_tmpl = llama->templates.template_tool_use.get();
460
+ auto tool_use_tmpl = llama->templates.get()->template_tool_use.get();
460
461
  if (tool_use_tmpl != nullptr) {
461
462
  auto tool_use_caps = createWriteableMap(env);
462
463
  auto tool_use_tmpl_caps = tool_use_tmpl->original_caps();
@@ -510,15 +511,16 @@ Java_com_rnllama_LlamaContext_getFormattedChatWithJinja(
510
511
  parallel_tool_calls,
511
512
  tool_choice_chars
512
513
  );
513
- putString(env, result, "prompt", formatted.prompt.get<std::string>().c_str());
514
+ putString(env, result, "prompt", formatted.prompt.c_str());
514
515
  putInt(env, result, "chat_format", static_cast<int>(formatted.format));
515
516
  putString(env, result, "grammar", formatted.grammar.c_str());
516
517
  putBoolean(env, result, "grammar_lazy", formatted.grammar_lazy);
517
518
  auto grammar_triggers = createWritableArray(env);
518
519
  for (const auto &trigger : formatted.grammar_triggers) {
519
520
  auto trigger_map = createWriteableMap(env);
520
- putString(env, trigger_map, "word", trigger.word.c_str());
521
- putBoolean(env, trigger_map, "at_start", trigger.at_start);
521
+ putInt(env, trigger_map, "type", trigger.type);
522
+ putString(env, trigger_map, "value", trigger.value.c_str());
523
+ putInt(env, trigger_map, "token", trigger.token);
522
524
  pushMap(env, grammar_triggers, trigger_map);
523
525
  }
524
526
  putArray(env, result, "grammar_triggers", grammar_triggers);
@@ -732,23 +734,53 @@ Java_com_rnllama_LlamaContext_doCompletion(
732
734
  sparams.grammar = grammar_chars;
733
735
  }
734
736
  sparams.grammar_lazy = grammar_lazy;
737
+
738
+ if (preserved_tokens != nullptr) {
739
+ int preserved_tokens_size = readablearray::size(env, preserved_tokens);
740
+ for (int i = 0; i < preserved_tokens_size; i++) {
741
+ jstring preserved_token = readablearray::getString(env, preserved_tokens, i);
742
+ auto ids = common_tokenize(llama->ctx, env->GetStringUTFChars(preserved_token, nullptr), /* add_special= */ false, /* parse_special= */ true);
743
+ if (ids.size() == 1) {
744
+ sparams.preserved_tokens.insert(ids[0]);
745
+ } else {
746
+ LOGI("[RNLlama] Not preserved because more than 1 token (wrong chat template override?): %s", env->GetStringUTFChars(preserved_token, nullptr));
747
+ }
748
+ }
749
+ }
750
+
735
751
  if (grammar_triggers != nullptr) {
736
752
  int grammar_triggers_size = readablearray::size(env, grammar_triggers);
737
753
  for (int i = 0; i < grammar_triggers_size; i++) {
738
- common_grammar_trigger trigger;
739
754
  auto trigger_map = readablearray::getMap(env, grammar_triggers, i);
740
- jstring trigger_word = readablemap::getString(env, trigger_map, "word", nullptr);
741
- jboolean trigger_at_start = readablemap::getBool(env, trigger_map, "at_start", false);
742
- trigger.word = env->GetStringUTFChars(trigger_word, nullptr);
743
- trigger.at_start = trigger_at_start;
744
-
745
- auto ids = common_tokenize(llama->ctx, trigger.word, /* add_special= */ false, /* parse_special= */ true);
746
- if (ids.size() == 1) {
747
- sparams.grammar_trigger_tokens.push_back(ids[0]);
748
- sparams.preserved_tokens.insert(ids[0]);
749
- continue;
755
+ const auto type = static_cast<common_grammar_trigger_type>(readablemap::getInt(env, trigger_map, "type", 0));
756
+ jstring trigger_word = readablemap::getString(env, trigger_map, "value", nullptr);
757
+ auto word = env->GetStringUTFChars(trigger_word, nullptr);
758
+
759
+ if (type == COMMON_GRAMMAR_TRIGGER_TYPE_WORD) {
760
+ auto ids = common_tokenize(llama->ctx, word, /* add_special= */ false, /* parse_special= */ true);
761
+ if (ids.size() == 1) {
762
+ auto token = ids[0];
763
+ if (std::find(sparams.preserved_tokens.begin(), sparams.preserved_tokens.end(), (llama_token) token) == sparams.preserved_tokens.end()) {
764
+ throw std::runtime_error("Grammar trigger word should be marked as preserved token");
765
+ }
766
+ common_grammar_trigger trigger;
767
+ trigger.type = COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN;
768
+ trigger.value = word;
769
+ trigger.token = token;
770
+ sparams.grammar_triggers.push_back(std::move(trigger));
771
+ } else {
772
+ sparams.grammar_triggers.push_back({COMMON_GRAMMAR_TRIGGER_TYPE_WORD, word});
773
+ }
774
+ } else {
775
+ common_grammar_trigger trigger;
776
+ trigger.type = type;
777
+ trigger.value = word;
778
+ if (type == COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN) {
779
+ const auto token = (llama_token) readablemap::getInt(env, trigger_map, "token", 0);
780
+ trigger.token = token;
781
+ }
782
+ sparams.grammar_triggers.push_back(std::move(trigger));
750
783
  }
751
- sparams.grammar_trigger_words.push_back(trigger);
752
784
  }
753
785
  }
754
786
 
@@ -759,18 +791,6 @@ Java_com_rnllama_LlamaContext_doCompletion(
759
791
  }
760
792
  env->ReleaseStringUTFChars(json_schema, json_schema_chars);
761
793
 
762
- if (preserved_tokens != nullptr) {
763
- int preserved_tokens_size = readablearray::size(env, preserved_tokens);
764
- for (int i = 0; i < preserved_tokens_size; i++) {
765
- jstring preserved_token = readablearray::getString(env, preserved_tokens, i);
766
- auto ids = common_tokenize(llama->ctx, env->GetStringUTFChars(preserved_token, nullptr), /* add_special= */ false, /* parse_special= */ true);
767
- if (ids.size() == 1) {
768
- sparams.preserved_tokens.insert(ids[0]);
769
- } else {
770
- LOGI("[RNLlama] Not preserved because more than 1 token (wrong chat template override?): %s", env->GetStringUTFChars(preserved_token, nullptr));
771
- }
772
- }
773
- }
774
794
 
775
795
  const llama_model * model = llama_get_model(llama->ctx);
776
796
  const llama_vocab * vocab = llama_model_get_vocab(model);
@@ -902,7 +922,7 @@ Java_com_rnllama_LlamaContext_doCompletion(
902
922
 
903
923
  auto toolCalls = createWritableArray(env);
904
924
  std::string reasoningContent = "";
905
- std::string *content = nullptr;
925
+ std::string content;
906
926
  auto toolCallsSize = 0;
907
927
  if (!llama->is_interrupted) {
908
928
  try {
@@ -910,7 +930,7 @@ Java_com_rnllama_LlamaContext_doCompletion(
910
930
  if (!message.reasoning_content.empty()) {
911
931
  reasoningContent = message.reasoning_content;
912
932
  }
913
- content = &message.content;
933
+ content = message.content;
914
934
  for (const auto &tc : message.tool_calls) {
915
935
  auto toolCall = createWriteableMap(env);
916
936
  putString(env, toolCall, "type", "function");
@@ -931,8 +951,8 @@ Java_com_rnllama_LlamaContext_doCompletion(
931
951
 
932
952
  auto result = createWriteableMap(env);
933
953
  putString(env, result, "text", llama->generated_text.c_str());
934
- if (content) {
935
- putString(env, result, "content", content->c_str());
954
+ if (!content.empty()) {
955
+ putString(env, result, "content", content.c_str());
936
956
  }
937
957
  if (!reasoningContent.empty()) {
938
958
  putString(env, result, "reasoning_content", reasoningContent.c_str());
@@ -0,0 +1,158 @@
1
+ #include "binary-ops.h"
2
+
3
+ #if defined(LM_GGML_USE_ACCELERATE)
4
+ #include <Accelerate/Accelerate.h>
5
+
6
+ using vDSP_fn_t = void (*)(const float *, vDSP_Stride, const float *, vDSP_Stride, float *, vDSP_Stride, vDSP_Length);
7
+ #endif
8
+
9
+ static inline float op_add(float a, float b) {
10
+ return a + b;
11
+ }
12
+
13
+ static inline float op_sub(float a, float b) {
14
+ return a - b;
15
+ }
16
+
17
+ static inline float op_mul(float a, float b) {
18
+ return a * b;
19
+ }
20
+
21
+ static inline float op_div(float a, float b) {
22
+ return a / b;
23
+ }
24
+
25
+ template <float (*op)(float, float), typename src0_t, typename src1_t, typename dst_t>
26
+ static inline void vec_binary_op_contiguous(const int64_t n, dst_t * z, const src0_t * x, const src1_t * y) {
27
+ constexpr auto src0_to_f32 = type_conversion_table<src0_t>::to_f32;
28
+ constexpr auto src1_to_f32 = type_conversion_table<src1_t>::to_f32;
29
+ constexpr auto f32_to_dst = type_conversion_table<dst_t >::from_f32;
30
+
31
+ for (int i = 0; i < n; i++) {
32
+ z[i] = f32_to_dst(op(src0_to_f32(x[i]), src1_to_f32(y[i])));
33
+ }
34
+ }
35
+
36
+ template <float (*op)(float, float), typename src0_t, typename src1_t, typename dst_t>
37
+ static inline void vec_binary_op_non_contiguous(const int64_t n, const int64_t ne10, const int64_t nb10, dst_t * z, const src0_t * x, const src1_t * y) {
38
+ constexpr auto src0_to_f32 = type_conversion_table<src0_t>::to_f32;
39
+ constexpr auto src1_to_f32 = type_conversion_table<src1_t>::to_f32;
40
+ constexpr auto f32_to_dst = type_conversion_table<dst_t >::from_f32;
41
+
42
+ for (int i = 0; i < n; i++) {
43
+ int i10 = i % ne10;
44
+ const src1_t * y_ptr = (const src1_t *)((const char *)y + i10*nb10);
45
+ z[i] = f32_to_dst(op(src0_to_f32(x[i]), src1_to_f32(*y_ptr)));
46
+ }
47
+ }
48
+
49
+ template <float (*op)(float, float), typename src0_t, typename src1_t, typename dst_t>
50
+ static void apply_binary_op(const lm_ggml_compute_params * params, lm_ggml_tensor * dst) {
51
+ const lm_ggml_tensor * src0 = dst->src[0];
52
+ const lm_ggml_tensor * src1 = dst->src[1];
53
+
54
+ LM_GGML_ASSERT(lm_ggml_can_repeat(src1, src0) && lm_ggml_are_same_shape(src0, dst));
55
+
56
+ LM_GGML_TENSOR_BINARY_OP_LOCALS
57
+
58
+ LM_GGML_ASSERT( nb0 == sizeof(dst_t));
59
+ LM_GGML_ASSERT(nb00 == sizeof(src0_t));
60
+
61
+ const auto [ir0, ir1] = get_thread_range(params, src0);
62
+ const bool is_src1_contiguous = (nb10 == sizeof(src1_t));
63
+
64
+ if (!is_src1_contiguous) { // broadcast not implemented yet for non-contiguous
65
+ LM_GGML_ASSERT(lm_ggml_are_same_shape(src0, src1));
66
+ }
67
+
68
+ #ifdef LM_GGML_USE_ACCELERATE
69
+ vDSP_fn_t vDSP_op = nullptr;
70
+ // TODO - avoid the f32-only check using type 'trait' lookup tables and row-based src-to-float conversion functions
71
+ if (src0->type == LM_GGML_TYPE_F32 && src1->type == LM_GGML_TYPE_F32 && dst->type == LM_GGML_TYPE_F32) {
72
+ if (op == op_add) {
73
+ vDSP_op = vDSP_vadd;
74
+ } else if (op == op_sub) {
75
+ vDSP_op = vDSP_vsub;
76
+ } else if (op == op_mul) {
77
+ vDSP_op = vDSP_vmul;
78
+ } else if (op == op_div) {
79
+ vDSP_op = vDSP_vdiv;
80
+ }
81
+ }
82
+ #endif
83
+
84
+ for (int64_t ir = ir0; ir < ir1; ++ir) {
85
+ const int64_t i03 = ir/(ne02*ne01);
86
+ const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
87
+ const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
88
+
89
+ const int64_t i13 = i03 % ne13;
90
+ const int64_t i12 = i02 % ne12;
91
+ const int64_t i11 = i01 % ne11;
92
+
93
+ dst_t * dst_ptr = (dst_t *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
94
+ const src0_t * src0_ptr = (const src0_t *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
95
+ const src1_t * src1_ptr = (const src1_t *) ((const char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11);
96
+
97
+ if (is_src1_contiguous) {
98
+ // src1 is broadcastable across src0 and dst in i1, i2, i3
99
+ const int64_t nr0 = ne00 / ne10;
100
+
101
+ for (int64_t r = 0; r < nr0; ++r) {
102
+ #ifdef LM_GGML_USE_ACCELERATE
103
+ if constexpr (std::is_same_v<src0_t, float> && std::is_same_v<src1_t, float> && std::is_same_v<dst_t, float>) {
104
+ if (vDSP_op != nullptr) {
105
+ vDSP_op(src1_ptr, 1, src0_ptr + r*ne10, 1, dst_ptr + r*ne10, 1, ne10);
106
+ continue;
107
+ }
108
+ }
109
+ #endif
110
+ vec_binary_op_contiguous<op>(ne10, dst_ptr + r*ne10, src0_ptr + r*ne10, src1_ptr);
111
+ }
112
+ } else {
113
+ vec_binary_op_non_contiguous<op>(ne0, ne10, nb10, dst_ptr, src0_ptr, src1_ptr);
114
+ }
115
+ }
116
+ }
117
+
118
+ // TODO: Use the 'traits' lookup table (for type conversion fns), instead of a mass of 'if' conditions with long templates
119
+ template <float (*op)(float, float)>
120
+ static void binary_op(const lm_ggml_compute_params * params, lm_ggml_tensor * dst) {
121
+ const lm_ggml_tensor * src0 = dst->src[0];
122
+ const lm_ggml_tensor * src1 = dst->src[1];
123
+
124
+ /* */ if (src0->type == LM_GGML_TYPE_F32 && src1->type == LM_GGML_TYPE_F32 && dst->type == LM_GGML_TYPE_F32) { // all f32
125
+ apply_binary_op<op, float, float, float>(params, dst);
126
+ } else if (src0->type == LM_GGML_TYPE_F16 && src1->type == LM_GGML_TYPE_F16 && dst->type == LM_GGML_TYPE_F16) { // all f16
127
+ apply_binary_op<op, lm_ggml_fp16_t, lm_ggml_fp16_t, lm_ggml_fp16_t>(params, dst);
128
+ } else if (src0->type == LM_GGML_TYPE_BF16 && src1->type == LM_GGML_TYPE_BF16 && dst->type == LM_GGML_TYPE_BF16) { // all bf16
129
+ apply_binary_op<op, lm_ggml_bf16_t, lm_ggml_bf16_t, lm_ggml_bf16_t>(params, dst);
130
+ } else if (src0->type == LM_GGML_TYPE_BF16 && src1->type == LM_GGML_TYPE_F32 && dst->type == LM_GGML_TYPE_BF16) {
131
+ apply_binary_op<op, lm_ggml_bf16_t, float, lm_ggml_bf16_t>(params, dst);
132
+ } else if (src0->type == LM_GGML_TYPE_BF16 && src1->type == LM_GGML_TYPE_F32 && dst->type == LM_GGML_TYPE_F32) {
133
+ apply_binary_op<op, lm_ggml_bf16_t, float, float>(params, dst);
134
+ } else if (src0->type == LM_GGML_TYPE_F16 && src1->type == LM_GGML_TYPE_F32 && dst->type == LM_GGML_TYPE_F16) {
135
+ apply_binary_op<op, lm_ggml_fp16_t, float, lm_ggml_fp16_t>(params, dst);
136
+ } else if (src0->type == LM_GGML_TYPE_F16 && src1->type == LM_GGML_TYPE_F32 && dst->type == LM_GGML_TYPE_F32) {
137
+ apply_binary_op<op, lm_ggml_fp16_t, float, float>(params, dst);
138
+ } else {
139
+ LM_GGML_ABORT("%s: unsupported types: dst: %s, src0: %s, src1: %s\n", __func__,
140
+ lm_ggml_type_name(dst->type), lm_ggml_type_name(src0->type), lm_ggml_type_name(src1->type));
141
+ }
142
+ }
143
+
144
+ void lm_ggml_compute_forward_add_non_quantized(const lm_ggml_compute_params * params, lm_ggml_tensor * dst) {
145
+ binary_op<op_add>(params, dst);
146
+ }
147
+
148
+ void lm_ggml_compute_forward_sub(const lm_ggml_compute_params * params, lm_ggml_tensor * dst) {
149
+ binary_op<op_sub>(params, dst);
150
+ }
151
+
152
+ void lm_ggml_compute_forward_mul(const lm_ggml_compute_params * params, lm_ggml_tensor * dst) {
153
+ binary_op<op_mul>(params, dst);
154
+ }
155
+
156
+ void lm_ggml_compute_forward_div(const lm_ggml_compute_params * params, lm_ggml_tensor * dst) {
157
+ binary_op<op_div>(params, dst);
158
+ }
@@ -0,0 +1,16 @@
1
+ #pragma once
2
+
3
+ #include "cpu-common.h"
4
+
5
+ #ifdef __cplusplus
6
+ extern "C" {
7
+ #endif
8
+
9
+ void lm_ggml_compute_forward_add_non_quantized(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
10
+ void lm_ggml_compute_forward_sub(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
11
+ void lm_ggml_compute_forward_mul(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
12
+ void lm_ggml_compute_forward_div(const struct lm_ggml_compute_params * params, struct lm_ggml_tensor * dst);
13
+
14
+ #ifdef __cplusplus
15
+ }
16
+ #endif