@fugood/llama.node 0.3.1 → 0.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (252) hide show
  1. package/CMakeLists.txt +1 -8
  2. package/bin/darwin/arm64/llama-node.node +0 -0
  3. package/bin/darwin/x64/llama-node.node +0 -0
  4. package/bin/linux/arm64/llama-node.node +0 -0
  5. package/bin/linux/x64/llama-node.node +0 -0
  6. package/bin/linux-vulkan/arm64/llama-node.node +0 -0
  7. package/bin/linux-vulkan/x64/llama-node.node +0 -0
  8. package/bin/win32/arm64/llama-node.node +0 -0
  9. package/bin/win32/arm64/node.lib +0 -0
  10. package/bin/win32/x64/llama-node.node +0 -0
  11. package/bin/win32/x64/node.lib +0 -0
  12. package/bin/win32-vulkan/arm64/llama-node.node +0 -0
  13. package/bin/win32-vulkan/arm64/node.lib +0 -0
  14. package/bin/win32-vulkan/x64/llama-node.node +0 -0
  15. package/bin/win32-vulkan/x64/node.lib +0 -0
  16. package/package.json +4 -2
  17. package/src/DetokenizeWorker.cpp +1 -1
  18. package/src/EmbeddingWorker.cpp +2 -2
  19. package/src/LlamaCompletionWorker.cpp +10 -10
  20. package/src/LlamaCompletionWorker.h +2 -2
  21. package/src/LlamaContext.cpp +14 -17
  22. package/src/TokenizeWorker.cpp +1 -1
  23. package/src/common.hpp +5 -4
  24. package/src/llama.cpp/.github/workflows/build.yml +137 -29
  25. package/src/llama.cpp/.github/workflows/close-issue.yml +5 -0
  26. package/src/llama.cpp/.github/workflows/docker.yml +46 -34
  27. package/src/llama.cpp/.github/workflows/nix-ci-aarch64.yml +7 -0
  28. package/src/llama.cpp/.github/workflows/nix-ci.yml +7 -0
  29. package/src/llama.cpp/.github/workflows/python-check-requirements.yml +2 -4
  30. package/src/llama.cpp/.github/workflows/python-type-check.yml +3 -1
  31. package/src/llama.cpp/.github/workflows/server.yml +7 -0
  32. package/src/llama.cpp/CMakeLists.txt +26 -11
  33. package/src/llama.cpp/cmake/arm64-apple-clang.cmake +16 -0
  34. package/src/llama.cpp/common/CMakeLists.txt +10 -10
  35. package/src/llama.cpp/common/arg.cpp +2041 -0
  36. package/src/llama.cpp/common/arg.h +77 -0
  37. package/src/llama.cpp/common/common.cpp +523 -1861
  38. package/src/llama.cpp/common/common.h +234 -106
  39. package/src/llama.cpp/common/console.cpp +3 -0
  40. package/src/llama.cpp/common/json-schema-to-grammar.cpp +1 -1
  41. package/src/llama.cpp/common/log.cpp +401 -0
  42. package/src/llama.cpp/common/log.h +66 -698
  43. package/src/llama.cpp/common/ngram-cache.cpp +39 -36
  44. package/src/llama.cpp/common/ngram-cache.h +19 -19
  45. package/src/llama.cpp/common/sampling.cpp +356 -350
  46. package/src/llama.cpp/common/sampling.h +62 -139
  47. package/src/llama.cpp/common/stb_image.h +5990 -6398
  48. package/src/llama.cpp/docs/build.md +72 -17
  49. package/src/llama.cpp/examples/CMakeLists.txt +1 -2
  50. package/src/llama.cpp/examples/batched/batched.cpp +49 -65
  51. package/src/llama.cpp/examples/batched-bench/batched-bench.cpp +42 -53
  52. package/src/llama.cpp/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +55 -52
  53. package/src/llama.cpp/examples/cvector-generator/cvector-generator.cpp +22 -22
  54. package/src/llama.cpp/examples/cvector-generator/pca.hpp +3 -13
  55. package/src/llama.cpp/examples/embedding/embedding.cpp +147 -91
  56. package/src/llama.cpp/examples/eval-callback/eval-callback.cpp +37 -37
  57. package/src/llama.cpp/examples/export-lora/export-lora.cpp +39 -38
  58. package/src/llama.cpp/examples/gbnf-validator/gbnf-validator.cpp +14 -39
  59. package/src/llama.cpp/examples/{baby-llama → gen-docs}/CMakeLists.txt +2 -2
  60. package/src/llama.cpp/examples/gen-docs/gen-docs.cpp +83 -0
  61. package/src/llama.cpp/examples/gguf-split/gguf-split.cpp +58 -39
  62. package/src/llama.cpp/examples/gritlm/gritlm.cpp +46 -39
  63. package/src/llama.cpp/examples/imatrix/imatrix.cpp +75 -69
  64. package/src/llama.cpp/examples/infill/infill.cpp +131 -192
  65. package/src/llama.cpp/examples/llama-bench/llama-bench.cpp +276 -178
  66. package/src/llama.cpp/examples/llama.android/llama/build.gradle.kts +1 -0
  67. package/src/llama.cpp/examples/llama.android/llama/src/main/cpp/llama-android.cpp +40 -36
  68. package/src/llama.cpp/examples/llava/CMakeLists.txt +7 -0
  69. package/src/llama.cpp/examples/llava/clip.cpp +686 -150
  70. package/src/llama.cpp/examples/llava/clip.h +11 -2
  71. package/src/llama.cpp/examples/llava/llava-cli.cpp +60 -71
  72. package/src/llama.cpp/examples/llava/llava.cpp +146 -26
  73. package/src/llama.cpp/examples/llava/llava.h +2 -3
  74. package/src/llama.cpp/examples/llava/minicpmv-cli.cpp +323 -0
  75. package/src/llama.cpp/examples/llava/requirements.txt +1 -0
  76. package/src/llama.cpp/examples/lookahead/lookahead.cpp +55 -56
  77. package/src/llama.cpp/examples/lookup/lookup-create.cpp +15 -13
  78. package/src/llama.cpp/examples/lookup/lookup-merge.cpp +4 -4
  79. package/src/llama.cpp/examples/lookup/lookup-stats.cpp +34 -33
  80. package/src/llama.cpp/examples/lookup/lookup.cpp +60 -63
  81. package/src/llama.cpp/examples/main/main.cpp +216 -313
  82. package/src/llama.cpp/examples/parallel/parallel.cpp +58 -59
  83. package/src/llama.cpp/examples/passkey/passkey.cpp +53 -61
  84. package/src/llama.cpp/examples/perplexity/perplexity.cpp +277 -311
  85. package/src/llama.cpp/examples/quantize/CMakeLists.txt +1 -1
  86. package/src/llama.cpp/examples/quantize/quantize.cpp +27 -9
  87. package/src/llama.cpp/examples/quantize-stats/quantize-stats.cpp +12 -12
  88. package/src/llama.cpp/examples/retrieval/retrieval.cpp +57 -52
  89. package/src/llama.cpp/examples/rpc/rpc-server.cpp +27 -2
  90. package/src/llama.cpp/examples/save-load-state/save-load-state.cpp +60 -46
  91. package/src/llama.cpp/examples/server/CMakeLists.txt +7 -18
  92. package/src/llama.cpp/examples/server/server.cpp +1347 -1531
  93. package/src/llama.cpp/examples/server/tests/requirements.txt +2 -1
  94. package/src/llama.cpp/examples/server/utils.hpp +396 -107
  95. package/src/llama.cpp/examples/simple/CMakeLists.txt +1 -1
  96. package/src/llama.cpp/examples/simple/simple.cpp +132 -106
  97. package/src/llama.cpp/examples/simple-chat/CMakeLists.txt +5 -0
  98. package/src/llama.cpp/examples/simple-chat/simple-chat.cpp +197 -0
  99. package/src/llama.cpp/examples/speculative/speculative.cpp +153 -124
  100. package/src/llama.cpp/examples/sycl/run-llama2.sh +10 -19
  101. package/src/llama.cpp/examples/sycl/win-run-llama2.bat +1 -1
  102. package/src/llama.cpp/examples/tokenize/tokenize.cpp +27 -29
  103. package/src/llama.cpp/ggml/CMakeLists.txt +29 -12
  104. package/src/llama.cpp/ggml/include/ggml-alloc.h +3 -3
  105. package/src/llama.cpp/ggml/include/ggml-amx.h +25 -0
  106. package/src/llama.cpp/ggml/include/ggml-backend.h +166 -68
  107. package/src/llama.cpp/ggml/include/ggml-blas.h +5 -3
  108. package/src/llama.cpp/ggml/include/ggml-cann.h +17 -19
  109. package/src/llama.cpp/ggml/include/ggml-cpp.h +38 -0
  110. package/src/llama.cpp/ggml/include/ggml-cpu.h +177 -0
  111. package/src/llama.cpp/ggml/include/ggml-cuda.h +17 -17
  112. package/src/llama.cpp/ggml/include/ggml-kompute.h +7 -3
  113. package/src/llama.cpp/ggml/include/ggml-metal.h +13 -12
  114. package/src/llama.cpp/ggml/include/ggml-opt.h +216 -0
  115. package/src/llama.cpp/ggml/include/ggml-rpc.h +9 -5
  116. package/src/llama.cpp/ggml/include/ggml-sycl.h +18 -11
  117. package/src/llama.cpp/ggml/include/ggml-vulkan.h +10 -8
  118. package/src/llama.cpp/ggml/include/ggml.h +272 -505
  119. package/src/llama.cpp/ggml/src/CMakeLists.txt +69 -1110
  120. package/src/llama.cpp/ggml/src/ggml-aarch64.c +52 -2116
  121. package/src/llama.cpp/ggml/src/ggml-aarch64.h +0 -20
  122. package/src/llama.cpp/ggml/src/ggml-alloc.c +29 -27
  123. package/src/llama.cpp/ggml/src/ggml-amx/CMakeLists.txt +107 -0
  124. package/src/llama.cpp/ggml/src/ggml-amx/common.h +94 -0
  125. package/src/llama.cpp/ggml/src/ggml-amx/ggml-amx.cpp +446 -0
  126. package/src/llama.cpp/ggml/src/ggml-amx/mmq.cpp +2510 -0
  127. package/src/llama.cpp/ggml/src/ggml-amx/mmq.h +17 -0
  128. package/src/llama.cpp/ggml/src/ggml-backend-impl.h +144 -81
  129. package/src/llama.cpp/ggml/src/ggml-backend-reg.cpp +195 -0
  130. package/src/llama.cpp/ggml/src/{ggml-backend.c → ggml-backend.cpp} +394 -635
  131. package/src/llama.cpp/ggml/src/ggml-blas/CMakeLists.txt +91 -0
  132. package/src/llama.cpp/ggml/src/{ggml-blas.cpp → ggml-blas/ggml-blas.cpp} +217 -70
  133. package/src/llama.cpp/ggml/src/ggml-cann/CMakeLists.txt +46 -0
  134. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.cpp +4 -27
  135. package/src/llama.cpp/ggml/src/ggml-cann/acl_tensor.h +32 -4
  136. package/src/llama.cpp/ggml/src/ggml-cann/aclnn_ops.cpp +179 -41
  137. package/src/llama.cpp/ggml/src/ggml-cann/common.h +1 -0
  138. package/src/llama.cpp/ggml/src/{ggml-cann.cpp → ggml-cann/ggml-cann.cpp} +458 -353
  139. package/src/llama.cpp/ggml/src/ggml-cann/kernels/CMakeLists.txt +2 -1
  140. package/src/llama.cpp/ggml/src/ggml-cann/kernels/ascendc_kernels.h +2 -0
  141. package/src/llama.cpp/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +278 -0
  142. package/src/llama.cpp/ggml/src/ggml-common.h +20 -0
  143. package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +261 -0
  144. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.c +3560 -0
  145. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +30 -0
  146. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-impl.h +371 -0
  147. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.c +10822 -0
  148. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu-quants.h +63 -0
  149. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +13970 -0
  150. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +663 -0
  151. package/src/llama.cpp/ggml/src/ggml-cpu/llamafile/sgemm.cpp +1885 -0
  152. package/src/llama.cpp/ggml/src/ggml-cuda/CMakeLists.txt +155 -0
  153. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/cuda.h +14 -0
  154. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/hip.h +178 -0
  155. package/src/llama.cpp/ggml/src/ggml-cuda/vendors/musa.h +134 -0
  156. package/src/llama.cpp/ggml/src/ggml-hip/CMakeLists.txt +106 -0
  157. package/src/llama.cpp/ggml/src/ggml-impl.h +380 -584
  158. package/src/llama.cpp/ggml/src/ggml-kompute/CMakeLists.txt +162 -0
  159. package/src/llama.cpp/ggml/src/{ggml-kompute.cpp → ggml-kompute/ggml-kompute.cpp} +233 -87
  160. package/src/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt +108 -0
  161. package/src/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h +249 -0
  162. package/src/llama.cpp/ggml/src/ggml-musa/CMakeLists.txt +100 -0
  163. package/src/llama.cpp/ggml/src/ggml-opt.cpp +867 -0
  164. package/src/llama.cpp/ggml/src/ggml-quants.c +369 -9994
  165. package/src/llama.cpp/ggml/src/ggml-quants.h +78 -110
  166. package/src/llama.cpp/ggml/src/ggml-rpc/CMakeLists.txt +11 -0
  167. package/src/llama.cpp/ggml/src/{ggml-rpc.cpp → ggml-rpc/ggml-rpc.cpp} +560 -335
  168. package/src/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt +81 -0
  169. package/src/llama.cpp/ggml/src/ggml-sycl/backend.hpp +6 -0
  170. package/src/llama.cpp/ggml/src/ggml-sycl/common.cpp +51 -0
  171. package/src/llama.cpp/ggml/src/ggml-sycl/common.hpp +310 -0
  172. package/src/llama.cpp/ggml/src/ggml-sycl/concat.cpp +1 -0
  173. package/src/llama.cpp/ggml/src/ggml-sycl/conv.cpp +99 -0
  174. package/src/llama.cpp/ggml/src/ggml-sycl/conv.hpp +21 -0
  175. package/src/llama.cpp/ggml/src/ggml-sycl/convert.cpp +57 -57
  176. package/src/llama.cpp/ggml/src/ggml-sycl/convert.hpp +1 -1
  177. package/src/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp +106 -106
  178. package/src/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp +4 -4
  179. package/src/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp +18 -25
  180. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp +1011 -0
  181. package/src/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp +76 -0
  182. package/src/llama.cpp/ggml/src/ggml-sycl/gemm.hpp +101 -0
  183. package/src/llama.cpp/ggml/src/{ggml-sycl.cpp → ggml-sycl/ggml-sycl.cpp} +3350 -3980
  184. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.cpp +125 -0
  185. package/src/llama.cpp/ggml/src/ggml-sycl/im2col.hpp +23 -0
  186. package/src/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp +70 -68
  187. package/src/llama.cpp/ggml/src/ggml-sycl/norm.cpp +9 -6
  188. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.cpp +56 -0
  189. package/src/llama.cpp/ggml/src/ggml-sycl/outprod.hpp +11 -0
  190. package/src/llama.cpp/ggml/src/ggml-sycl/presets.hpp +8 -0
  191. package/src/llama.cpp/ggml/src/ggml-sycl/rope.cpp +1 -1
  192. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp +71 -0
  193. package/src/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp +21 -0
  194. package/src/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp +4 -4
  195. package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.cpp +138 -0
  196. package/src/llama.cpp/ggml/src/ggml-sycl/wkv6.hpp +10 -0
  197. package/src/llama.cpp/ggml/src/ggml-threading.cpp +12 -0
  198. package/src/llama.cpp/ggml/src/ggml-threading.h +12 -0
  199. package/src/llama.cpp/ggml/src/ggml-vulkan/CMakeLists.txt +78 -0
  200. package/src/llama.cpp/ggml/src/{ggml-vulkan.cpp → ggml-vulkan/ggml-vulkan.cpp} +2034 -1718
  201. package/src/llama.cpp/ggml/src/{vulkan-shaders → ggml-vulkan/vulkan-shaders}/CMakeLists.txt +2 -0
  202. package/src/llama.cpp/ggml/src/{vulkan-shaders → ggml-vulkan/vulkan-shaders}/vulkan-shaders-gen.cpp +152 -185
  203. package/src/llama.cpp/ggml/src/ggml.c +2075 -16579
  204. package/src/llama.cpp/include/llama.h +296 -285
  205. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.inp +112 -0
  206. package/src/llama.cpp/models/ggml-vocab-chameleon.gguf.out +46 -0
  207. package/src/llama.cpp/pocs/vdot/q8dot.cpp +4 -3
  208. package/src/llama.cpp/pocs/vdot/vdot.cpp +8 -7
  209. package/src/llama.cpp/requirements/requirements-convert_legacy_llama.txt +1 -1
  210. package/src/llama.cpp/src/CMakeLists.txt +2 -1
  211. package/src/llama.cpp/src/llama-grammar.cpp +721 -122
  212. package/src/llama.cpp/src/llama-grammar.h +120 -15
  213. package/src/llama.cpp/src/llama-impl.h +156 -1
  214. package/src/llama.cpp/src/llama-sampling.cpp +2058 -346
  215. package/src/llama.cpp/src/llama-sampling.h +39 -47
  216. package/src/llama.cpp/src/llama-vocab.cpp +390 -127
  217. package/src/llama.cpp/src/llama-vocab.h +60 -20
  218. package/src/llama.cpp/src/llama.cpp +6215 -3263
  219. package/src/llama.cpp/src/unicode-data.cpp +6 -4
  220. package/src/llama.cpp/src/unicode-data.h +4 -4
  221. package/src/llama.cpp/src/unicode.cpp +15 -7
  222. package/src/llama.cpp/tests/CMakeLists.txt +4 -2
  223. package/src/llama.cpp/tests/test-arg-parser.cpp +131 -0
  224. package/src/llama.cpp/tests/test-backend-ops.cpp +1725 -297
  225. package/src/llama.cpp/tests/test-barrier.cpp +94 -0
  226. package/src/llama.cpp/tests/test-chat-template.cpp +9 -5
  227. package/src/llama.cpp/tests/test-grammar-integration.cpp +23 -38
  228. package/src/llama.cpp/tests/test-grammar-parser.cpp +6 -4
  229. package/src/llama.cpp/tests/test-json-schema-to-grammar.cpp +23 -8
  230. package/src/llama.cpp/tests/test-llama-grammar.cpp +9 -8
  231. package/src/llama.cpp/tests/test-log.cpp +39 -0
  232. package/src/llama.cpp/tests/test-opt.cpp +853 -142
  233. package/src/llama.cpp/tests/test-quantize-fns.cpp +28 -19
  234. package/src/llama.cpp/tests/test-quantize-perf.cpp +16 -14
  235. package/src/llama.cpp/tests/test-rope.cpp +2 -1
  236. package/src/llama.cpp/tests/test-sampling.cpp +226 -142
  237. package/src/llama.cpp/tests/test-tokenizer-0.cpp +56 -36
  238. package/src/llama.cpp/tests/test-tokenizer-1-bpe.cpp +5 -5
  239. package/src/llama.cpp/tests/test-tokenizer-1-spm.cpp +5 -5
  240. package/patches/llama.patch +0 -22
  241. package/src/llama.cpp/.github/workflows/bench.yml +0 -310
  242. package/src/llama.cpp/common/grammar-parser.cpp +0 -536
  243. package/src/llama.cpp/common/grammar-parser.h +0 -29
  244. package/src/llama.cpp/common/train.cpp +0 -1513
  245. package/src/llama.cpp/common/train.h +0 -233
  246. package/src/llama.cpp/examples/baby-llama/baby-llama.cpp +0 -1640
  247. package/src/llama.cpp/examples/benchmark/CMakeLists.txt +0 -6
  248. package/src/llama.cpp/examples/benchmark/benchmark-matmult.cpp +0 -275
  249. package/src/llama.cpp/ggml/src/llamafile/sgemm.cpp +0 -1027
  250. package/src/llama.cpp/tests/test-grad0.cpp +0 -1566
  251. /package/src/llama.cpp/ggml/{cmake → src/ggml-cpu/cmake}/FindSIMD.cmake +0 -0
  252. /package/src/llama.cpp/ggml/src/{llamafile → ggml-cpu/llamafile}/sgemm.h +0 -0
@@ -2,6 +2,7 @@
2
2
  #define LLAMA_H
3
3
 
4
4
  #include "ggml.h"
5
+ #include "ggml-cpu.h"
5
6
  #include "ggml-backend.h"
6
7
 
7
8
  #include <stddef.h>
@@ -33,12 +34,15 @@
33
34
 
34
35
  #define LLAMA_DEFAULT_SEED 0xFFFFFFFF
35
36
 
37
+ // TODO: use everywhere in the implementation
38
+ #define LLAMA_TOKEN_NULL -1
39
+
36
40
  #define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
37
41
  #define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
38
42
  #define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq'
39
43
 
40
44
  #define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
41
- #define LLAMA_SESSION_VERSION 8
45
+ #define LLAMA_SESSION_VERSION 9
42
46
 
43
47
  #define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ
44
48
  #define LLAMA_STATE_SEQ_VERSION 2
@@ -53,8 +57,10 @@ extern "C" {
53
57
  // TODO: show sample usage
54
58
  //
55
59
 
60
+ // struct llama_vocab; // TODO: add in the future
56
61
  struct llama_model;
57
62
  struct llama_context;
63
+ struct llama_sampler;
58
64
 
59
65
  typedef int32_t llama_pos;
60
66
  typedef int32_t llama_token;
@@ -66,6 +72,7 @@ extern "C" {
66
72
  LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE
67
73
  LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece
68
74
  LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram
75
+ LLAMA_VOCAB_TYPE_RWKV = 5, // RWKV tokenizer based on greedy tokenization
69
76
  };
70
77
 
71
78
  // pre-tokenization types
@@ -93,15 +100,16 @@ extern "C" {
93
100
  LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20,
94
101
  LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21,
95
102
  LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22,
103
+ LLAMA_VOCAB_PRE_TYPE_BLOOM = 23,
104
+ LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24,
105
+ LLAMA_VOCAB_PRE_TYPE_EXAONE = 25,
106
+ LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26,
96
107
  };
97
108
 
98
- // note: these values should be synchronized with ggml_rope
99
- // TODO: maybe move this enum to ggml.h (ggml_rope_type)
100
109
  enum llama_rope_type {
101
110
  LLAMA_ROPE_TYPE_NONE = -1,
102
- LLAMA_ROPE_TYPE_NORM = 0,
103
- LLAMA_ROPE_TYPE_NEOX = 2,
104
- LLAMA_ROPE_TYPE_GLM = 4,
111
+ LLAMA_ROPE_TYPE_NORM = 0,
112
+ LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX,
105
113
  };
106
114
 
107
115
  enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file
@@ -166,6 +174,8 @@ extern "C" {
166
174
  LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // except 1d tensors
167
175
  LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // except 1d tensors
168
176
  LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // except 1d tensors
177
+ LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors
178
+ LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors
169
179
 
170
180
  LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
171
181
  };
@@ -184,6 +194,7 @@ extern "C" {
184
194
  LLAMA_POOLING_TYPE_MEAN = 1,
185
195
  LLAMA_POOLING_TYPE_CLS = 2,
186
196
  LLAMA_POOLING_TYPE_LAST = 3,
197
+ LLAMA_POOLING_TYPE_RANK = 4, // used by reranking models to attach the classification head to the graph
187
198
  };
188
199
 
189
200
  enum llama_attention_type {
@@ -193,11 +204,12 @@ extern "C" {
193
204
  };
194
205
 
195
206
  enum llama_split_mode {
196
- LLAMA_SPLIT_MODE_NONE = 0, // single GPU
197
- LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs
198
- LLAMA_SPLIT_MODE_ROW = 2, // split rows across GPUs
207
+ LLAMA_SPLIT_MODE_NONE = 0, // single GPU
208
+ LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs
209
+ LLAMA_SPLIT_MODE_ROW = 2, // split layers and KV across GPUs, use tensor parallelism if supported
199
210
  };
200
211
 
212
+ // TODO: simplify (https://github.com/ggerganov/llama.cpp/pull/9294#pullrequestreview-2286561979)
201
213
  typedef struct llama_token_data {
202
214
  llama_token id; // token id
203
215
  float logit; // log-odds of the token
@@ -205,8 +217,11 @@ extern "C" {
205
217
  } llama_token_data;
206
218
 
207
219
  typedef struct llama_token_data_array {
220
+ // TODO: consider SoA
221
+ // NOTE: this pointer can be modified by the samplers
208
222
  llama_token_data * data;
209
223
  size_t size;
224
+ int64_t selected; // this is the index in the data array (i.e. not the token id)
210
225
  bool sorted;
211
226
  } llama_token_data_array;
212
227
 
@@ -219,8 +234,11 @@ extern "C" {
219
234
  // - token : the token ids of the input (used when embd is NULL)
220
235
  // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
221
236
  // - pos : the positions of the respective token in the sequence
237
+ // (if set to NULL, the token position will be tracked automatically by llama_decode)
222
238
  // - seq_id : the sequence to which the respective token belongs
239
+ // (if set to NULL, the sequence ID will be assumed to be 0)
223
240
  // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output
241
+ // (if set to NULL, only the logits for last token will be returned)
224
242
  //
225
243
  typedef struct llama_batch {
226
244
  int32_t n_tokens;
@@ -231,15 +249,6 @@ extern "C" {
231
249
  int32_t * n_seq_id;
232
250
  llama_seq_id ** seq_id;
233
251
  int8_t * logits; // TODO: rename this to "output"
234
-
235
- // NOTE: helpers for smooth API transition - can be deprecated in the future
236
- // for future-proof code, use the above fields instead and ignore everything below
237
- //
238
- // pos[i] = all_pos_0 + i*all_pos_1
239
- //
240
- llama_pos all_pos_0; // used if pos == NULL
241
- llama_pos all_pos_1; // used if pos == NULL
242
- llama_seq_id all_seq_id; // used if seq_id == NULL
243
252
  } llama_batch;
244
253
 
245
254
  enum llama_model_kv_override_type {
@@ -266,10 +275,7 @@ extern "C" {
266
275
  int32_t n_gpu_layers; // number of layers to store in VRAM
267
276
  enum llama_split_mode split_mode; // how to split the model across multiple GPUs
268
277
 
269
- // main_gpu interpretation depends on split_mode:
270
- // LLAMA_SPLIT_NONE: the GPU that is used for the entire model
271
- // LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results
272
- // LLAMA_SPLIT_LAYER: ignored
278
+ // the GPU that is used for the entire model when split_mode is LLAMA_SPLIT_MODE_NONE
273
279
  int32_t main_gpu;
274
280
 
275
281
  // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
@@ -299,13 +305,12 @@ extern "C" {
299
305
  // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
300
306
  // https://github.com/ggerganov/llama.cpp/pull/7544
301
307
  struct llama_context_params {
302
- uint32_t seed; // RNG seed, -1 for random
303
308
  uint32_t n_ctx; // text context, 0 = from model
304
309
  uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode
305
310
  uint32_t n_ubatch; // physical maximum batch size
306
311
  uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models)
307
- uint32_t n_threads; // number of threads to use for generation
308
- uint32_t n_threads_batch; // number of threads to use for batch processing
312
+ int32_t n_threads; // number of threads to use for generation
313
+ int32_t n_threads_batch; // number of threads to use for batch processing
309
314
 
310
315
  enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
311
316
  enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
@@ -327,11 +332,13 @@ extern "C" {
327
332
  enum ggml_type type_k; // data type for K cache [EXPERIMENTAL]
328
333
  enum ggml_type type_v; // data type for V cache [EXPERIMENTAL]
329
334
 
330
- // Keep the booleans together to avoid misalignment during copy-by-value.
335
+ // Keep the booleans together and at the end of the struct to avoid misalignment during copy-by-value.
336
+ // TODO: move at the end of the struct
331
337
  bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
332
338
  bool embeddings; // if true, extract embeddings (together with logits)
333
339
  bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
334
340
  bool flash_attn; // whether to use flash attention [EXPERIMENTAL]
341
+ bool no_perf; // whether to measure performance timings
335
342
 
336
343
  // Abort callback
337
344
  // if it returns true, execution of llama_decode() will be aborted
@@ -345,7 +352,7 @@ extern "C" {
345
352
  int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
346
353
  enum llama_ftype ftype; // quantize to this llama_ftype
347
354
  enum ggml_type output_tensor_type; // output tensor type
348
- enum ggml_type token_embedding_type; // itoken embeddings tensor type
355
+ enum ggml_type token_embedding_type; // token embeddings tensor type
349
356
  bool allow_requantize; // allow quantizing non-f32/f16 tensors
350
357
  bool quantize_output_tensor; // quantize output.weight
351
358
  bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
@@ -355,56 +362,14 @@ extern "C" {
355
362
  void * kv_overrides; // pointer to vector containing overrides
356
363
  } llama_model_quantize_params;
357
364
 
358
- // grammar types
359
- struct llama_grammar;
360
-
361
- // grammar element type
362
- enum llama_gretype {
363
- // end of rule definition
364
- LLAMA_GRETYPE_END = 0,
365
-
366
- // start of alternate definition for rule
367
- LLAMA_GRETYPE_ALT = 1,
368
-
369
- // non-terminal element: reference to rule
370
- LLAMA_GRETYPE_RULE_REF = 2,
371
-
372
- // terminal element: character (code point)
373
- LLAMA_GRETYPE_CHAR = 3,
374
-
375
- // inverse char(s) ([^a], [^a-b] [^abc])
376
- LLAMA_GRETYPE_CHAR_NOT = 4,
377
-
378
- // modifies a preceding LLAMA_GRETYPE_CHAR or LLAMA_GRETYPE_CHAR_ALT to
379
- // be an inclusive range ([a-z])
380
- LLAMA_GRETYPE_CHAR_RNG_UPPER = 5,
381
-
382
- // modifies a preceding LLAMA_GRETYPE_CHAR or
383
- // LLAMA_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA])
384
- LLAMA_GRETYPE_CHAR_ALT = 6,
385
-
386
- // any character (.)
387
- LLAMA_GRETYPE_CHAR_ANY = 7,
388
- };
389
-
390
- typedef struct llama_grammar_element {
391
- enum llama_gretype type;
392
- uint32_t value; // Unicode code point or rule ID
393
- } llama_grammar_element;
394
-
395
- // performance timing information
396
- struct llama_timings {
397
- double t_start_ms;
398
- double t_end_ms;
399
- double t_load_ms;
400
- double t_sample_ms;
401
- double t_p_eval_ms;
402
- double t_eval_ms;
365
+ typedef struct llama_logit_bias {
366
+ llama_token token;
367
+ float bias;
368
+ } llama_logit_bias;
403
369
 
404
- int32_t n_sample;
405
- int32_t n_p_eval;
406
- int32_t n_eval;
407
- };
370
+ typedef struct llama_sampler_chain_params {
371
+ bool no_perf; // whether to measure performance timings
372
+ } llama_sampler_chain_params;
408
373
 
409
374
  // used in chat template
410
375
  typedef struct llama_chat_message {
@@ -416,8 +381,10 @@ extern "C" {
416
381
  struct llama_lora_adapter;
417
382
 
418
383
  // Helpers for getting default parameters
419
- LLAMA_API struct llama_model_params llama_model_default_params(void);
420
- LLAMA_API struct llama_context_params llama_context_default_params(void);
384
+ // TODO: update API to start accepting pointers to params structs (https://github.com/ggerganov/llama.cpp/discussions/9172)
385
+ LLAMA_API struct llama_model_params llama_model_default_params(void);
386
+ LLAMA_API struct llama_context_params llama_context_default_params(void);
387
+ LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void);
421
388
  LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
422
389
 
423
390
  // Initialize the llama + ggml backend
@@ -428,15 +395,23 @@ extern "C" {
428
395
  //optional:
429
396
  LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa);
430
397
 
398
+ // Optional: an auto threadpool gets created in ggml if not passed explicitly
399
+ LLAMA_API void llama_attach_threadpool(
400
+ struct llama_context * ctx,
401
+ ggml_threadpool_t threadpool,
402
+ ggml_threadpool_t threadpool_batch);
403
+ LLAMA_API void llama_detach_threadpool(struct llama_context * ctx);
404
+
431
405
  // Call once at the end of the program - currently only used for MPI
432
406
  LLAMA_API void llama_backend_free(void);
433
407
 
434
408
  LLAMA_API struct llama_model * llama_load_model_from_file(
435
409
  const char * path_model,
436
- struct llama_model_params params);
410
+ struct llama_model_params params);
437
411
 
438
412
  LLAMA_API void llama_free_model(struct llama_model * model);
439
413
 
414
+ // TODO: rename to llama_init_from_model
440
415
  LLAMA_API struct llama_context * llama_new_context_with_model(
441
416
  struct llama_model * model,
442
417
  struct llama_context_params params);
@@ -451,23 +426,24 @@ extern "C" {
451
426
  LLAMA_API bool llama_supports_mmap (void);
452
427
  LLAMA_API bool llama_supports_mlock (void);
453
428
  LLAMA_API bool llama_supports_gpu_offload(void);
454
-
455
- LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
429
+ LLAMA_API bool llama_supports_rpc (void);
456
430
 
457
431
  LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
458
432
  LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
459
433
  LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx);
460
434
  LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx);
461
435
 
462
- LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx);
463
-
464
- LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model);
465
- LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model);
466
-
467
436
  LLAMA_API int32_t llama_n_vocab (const struct llama_model * model);
468
437
  LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model);
469
438
  LLAMA_API int32_t llama_n_embd (const struct llama_model * model);
470
439
  LLAMA_API int32_t llama_n_layer (const struct llama_model * model);
440
+ LLAMA_API int32_t llama_n_head (const struct llama_model * model);
441
+
442
+ LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
443
+
444
+ LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx);
445
+ LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model);
446
+ LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model);
471
447
 
472
448
  // Get the model's RoPE frequency scaling factor
473
449
  LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
@@ -504,10 +480,16 @@ extern "C" {
504
480
  // Returns true if the model contains an encoder that requires llama_encode() call
505
481
  LLAMA_API bool llama_model_has_encoder(const struct llama_model * model);
506
482
 
483
+ // Returns true if the model contains a decoder that requires llama_decode() call
484
+ LLAMA_API bool llama_model_has_decoder(const struct llama_model * model);
485
+
507
486
  // For encoder-decoder models, this function returns id of the token that must be provided
508
487
  // to the decoder to start generating output sequence. For other models, it returns -1.
509
488
  LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model);
510
489
 
490
+ // Returns true if the model is recurrent (like Mamba, RWKV, etc.)
491
+ LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model);
492
+
511
493
  // Returns 0 on success
512
494
  LLAMA_API uint32_t llama_model_quantize(
513
495
  const char * fname_inp,
@@ -690,7 +672,7 @@ extern "C" {
690
672
  //
691
673
 
692
674
  // Returns the *actual* size in bytes of the state
693
- // (rng, logits, embedding and kv_cache)
675
+ // (logits, embedding and kv_cache)
694
676
  // Only use when saving the state, not when restoring it, otherwise the size may be too small.
695
677
  LLAMA_API size_t llama_state_get_size(struct llama_context * ctx);
696
678
  LLAMA_API DEPRECATED(size_t llama_get_state_size(struct llama_context * ctx),
@@ -787,15 +769,15 @@ extern "C" {
787
769
  // Decoding
788
770
  //
789
771
 
790
- // Return batch for single sequence of tokens starting at pos_0
772
+ // Return batch for single sequence of tokens
773
+ // The sequence ID will be fixed to 0
774
+ // The position of the tokens will be tracked automatically by llama_decode
791
775
  //
792
776
  // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
793
777
  //
794
778
  LLAMA_API struct llama_batch llama_batch_get_one(
795
779
  llama_token * tokens,
796
- int32_t n_tokens,
797
- llama_pos pos_0,
798
- llama_seq_id seq_id);
780
+ int32_t n_tokens);
799
781
 
800
782
  // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
801
783
  // Each token can be assigned up to n_seq_max sequence ids
@@ -815,7 +797,7 @@ extern "C" {
815
797
  // Processes a batch of tokens with the ecoder part of the encoder-decoder model.
816
798
  // Stores the encoder output internally for later use by the decoder cross-attention layers.
817
799
  // 0 - success
818
- // < 0 - error
800
+ // < 0 - error. the KV cache state is restored to the state before this call
819
801
  LLAMA_API int32_t llama_encode(
820
802
  struct llama_context * ctx,
821
803
  struct llama_batch batch);
@@ -823,7 +805,7 @@ extern "C" {
823
805
  // Positive return values does not mean a fatal error, but rather a warning.
824
806
  // 0 - success
825
807
  // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
826
- // < 0 - error
808
+ // < 0 - error. the KV cache state is restored to the state before this call
827
809
  LLAMA_API int32_t llama_decode(
828
810
  struct llama_context * ctx,
829
811
  struct llama_batch batch);
@@ -831,13 +813,13 @@ extern "C" {
831
813
  // Set the number of threads used for decoding
832
814
  // n_threads is the number of threads used for generation (single token)
833
815
  // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
834
- LLAMA_API void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch);
816
+ LLAMA_API void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch);
835
817
 
836
818
  // Get the number of threads used for generation of a single token.
837
- LLAMA_API uint32_t llama_n_threads(struct llama_context * ctx);
819
+ LLAMA_API int32_t llama_n_threads(struct llama_context * ctx);
838
820
 
839
821
  // Get the number of threads used for prompt and batch processing (multiple token).
840
- LLAMA_API uint32_t llama_n_threads_batch(struct llama_context * ctx);
822
+ LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx);
841
823
 
842
824
  // Set whether the model is in embeddings mode or not
843
825
  // If true, embeddings will be returned but logits will not
@@ -885,7 +867,8 @@ extern "C" {
885
867
 
886
868
  // Get the embeddings for a sequence id
887
869
  // Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE
888
- // shape: [n_embd] (1-dimensional)
870
+ // when pooling_type == LLAMA_POOLING_TYPE_RANK, returns float[1] with the rank of the sequence
871
+ // otherwise: float[n_embd] (1-dimensional)
889
872
  LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id);
890
873
 
891
874
  //
@@ -907,26 +890,32 @@ extern "C" {
907
890
  // Special tokens
908
891
  LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence
909
892
  LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence
893
+ LLAMA_API llama_token llama_token_eot(const struct llama_model * model); // end-of-turn
910
894
  LLAMA_API llama_token llama_token_cls(const struct llama_model * model); // classification
911
895
  LLAMA_API llama_token llama_token_sep(const struct llama_model * model); // sentence separator
912
896
  LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
913
897
  LLAMA_API llama_token llama_token_pad(const struct llama_model * model); // padding
914
898
 
915
- // Returns -1 if unknown, 1 for true or 0 for false.
916
- LLAMA_API int32_t llama_add_bos_token(const struct llama_model * model);
899
+ LLAMA_API bool llama_add_bos_token(const struct llama_model * model);
900
+ LLAMA_API bool llama_add_eos_token(const struct llama_model * model);
917
901
 
918
- // Returns -1 if unknown, 1 for true or 0 for false.
919
- LLAMA_API int32_t llama_add_eos_token(const struct llama_model * model);
902
+ // infill tokens
903
+ DEPRECATED(LLAMA_API llama_token llama_token_prefix(const struct llama_model * model), "use llama_token_fim_pre instead");
904
+ DEPRECATED(LLAMA_API llama_token llama_token_middle(const struct llama_model * model), "use llama_token_fim_mid instead");
905
+ DEPRECATED(LLAMA_API llama_token llama_token_suffix(const struct llama_model * model), "use llama_token_fim_suf instead");
920
906
 
921
- // Codellama infill tokens
922
- LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
923
- LLAMA_API llama_token llama_token_middle(const struct llama_model * model); // Beginning of infill middle
924
- LLAMA_API llama_token llama_token_suffix(const struct llama_model * model); // Beginning of infill suffix
925
- LLAMA_API llama_token llama_token_eot (const struct llama_model * model); // End of infill middle
907
+ LLAMA_API llama_token llama_token_fim_pre(const struct llama_model * model);
908
+ LLAMA_API llama_token llama_token_fim_suf(const struct llama_model * model);
909
+ LLAMA_API llama_token llama_token_fim_mid(const struct llama_model * model);
910
+ LLAMA_API llama_token llama_token_fim_pad(const struct llama_model * model);
911
+ LLAMA_API llama_token llama_token_fim_rep(const struct llama_model * model);
912
+ LLAMA_API llama_token llama_token_fim_sep(const struct llama_model * model);
926
913
 
927
914
  //
928
915
  // Tokenization
929
916
  //
917
+ // The API is thread-safe.
918
+ //
930
919
 
931
920
  /// @details Convert the provided text into tokens.
932
921
  /// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
@@ -996,121 +985,117 @@ extern "C" {
996
985
  int32_t length);
997
986
 
998
987
  //
999
- // Grammar
988
+ // Sampling API
989
+ //
990
+ // Sample usage:
991
+ //
992
+ // // prepare the sampling chain at the start
993
+ // auto sparams = llama_sampler_chain_default_params();
994
+ //
995
+ // llama_sampler * smpl = llama_sampler_chain_init(sparams);
996
+ //
997
+ // llama_sampler_chain_add(smpl, llama_sampler_init_top_k(50));
998
+ // llama_sampler_chain_add(smpl, llama_sampler_init_top_p(0.9, 1));
999
+ // llama_sampler_chain_add(smpl, llama_sampler_init_temp (0.8));
1000
+ //
1001
+ // // typically, the chain should end with a sampler such as "greedy", "dist" or "mirostat"
1002
+ // // this sampler will be responsible to select the actual token
1003
+ // llama_sampler_chain_add(smpl, llama_sampler_init_dist(seed));
1004
+ //
1005
+ // ...
1006
+ //
1007
+ // // decoding loop:
1008
+ // while (...) {
1009
+ // ...
1010
+ //
1011
+ // llama_decode(ctx, batch);
1012
+ //
1013
+ // // sample from the logits of the last token in the batch
1014
+ // const llama_token id = llama_sampler_sample(smpl, ctx, -1);
1015
+ //
1016
+ // // accepting the token updates the internal state of certain samplers (e.g. grammar, repetition, etc.)
1017
+ // llama_sampler_accept(smpl, id);
1018
+ // ...
1019
+ // }
1020
+ //
1021
+ // llama_sampler_free(smpl);
1022
+ //
1023
+ // TODO: In the future, llama_sampler will be utilized to offload the sampling to the backends (e.g. GPU).
1024
+ // TODO: in the future, the entire sampling API that uses llama_model should start using llama_vocab
1000
1025
  //
1001
1026
 
1002
- /// Initialize a llama_grammar.
1003
- ///
1004
- /// @param rules The rule elements of the grammar to initialize.
1005
- /// @param n_rules The number of rules.
1006
- /// @param start_rule_index The index of the root rule (the starting point of the grammar).
1007
- /// @return The initialized llama_grammar or nullptr if initialization failed.
1008
- LLAMA_API struct llama_grammar * llama_grammar_init(
1009
- const llama_grammar_element ** rules,
1010
- size_t n_rules,
1011
- size_t start_rule_index);
1012
-
1013
- LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
1014
-
1015
- LLAMA_API struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar);
1016
-
1017
- /// @details Apply constraints from grammar
1018
- LLAMA_API void llama_grammar_sample(
1019
- const struct llama_grammar * grammar,
1020
- const struct llama_context * ctx,
1021
- llama_token_data_array * candidates);
1022
- LLAMA_API DEPRECATED(void llama_sample_grammar(
1023
- struct llama_context * ctx,
1024
- llama_token_data_array * candidates,
1025
- const struct llama_grammar * grammar),
1026
- "use llama_grammar_sample instead");
1027
+ typedef void * llama_sampler_context_t;
1027
1028
 
1028
- /// @details Accepts the sampled token into the grammar
1029
- LLAMA_API void llama_grammar_accept_token(
1030
- struct llama_grammar * grammar,
1031
- struct llama_context * ctx,
1032
- llama_token token);
1029
+ // user code can implement the interface below in order to create custom llama_sampler
1030
+ struct llama_sampler_i {
1031
+ const char * (*name) (const struct llama_sampler * smpl); // can be NULL
1032
+ void (*accept)( struct llama_sampler * smpl, llama_token token); // can be NULL
1033
+ void (*apply) ( struct llama_sampler * smpl, llama_token_data_array * cur_p); // required
1034
+ void (*reset) ( struct llama_sampler * smpl); // can be NULL
1035
+ struct llama_sampler * (*clone) (const struct llama_sampler * smpl); // can be NULL if ctx is NULL
1036
+ void (*free) ( struct llama_sampler * smpl); // can be NULL if ctx is NULL
1033
1037
 
1034
- //
1035
- // Sampling functions
1036
- //
1038
+ // TODO: API for internal libllama usage for appending the sampling to an existing ggml_cgraph
1039
+ //void (*apply_ggml) (struct llama_sampler * smpl, ...);
1040
+ };
1037
1041
 
1038
- // Sets the current rng seed.
1039
- LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
1042
+ struct llama_sampler {
1043
+ struct llama_sampler_i * iface;
1044
+ llama_sampler_context_t ctx;
1045
+ };
1040
1046
 
1041
- /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
1042
- /// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
1043
- LLAMA_API void llama_sample_repetition_penalties(
1044
- struct llama_context * ctx,
1045
- llama_token_data_array * candidates,
1046
- const llama_token * last_tokens,
1047
- size_t penalty_last_n,
1048
- float penalty_repeat,
1049
- float penalty_freq,
1050
- float penalty_present);
1051
-
1052
- /// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806
1053
- /// @param logits Logits extracted from the original generation context.
1054
- /// @param logits_guidance Logits extracted from a separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context.
1055
- /// @param scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance.
1056
- LLAMA_API void llama_sample_apply_guidance(
1057
- struct llama_context * ctx,
1058
- float * logits,
1059
- float * logits_guidance,
1060
- float scale);
1047
+ // mirror of llama_sampler_i:
1048
+ LLAMA_API const char * llama_sampler_name (const struct llama_sampler * smpl);
1049
+ LLAMA_API void llama_sampler_accept( struct llama_sampler * smpl, llama_token token);
1050
+ LLAMA_API void llama_sampler_apply ( struct llama_sampler * smpl, llama_token_data_array * cur_p);
1051
+ LLAMA_API void llama_sampler_reset ( struct llama_sampler * smpl);
1052
+ LLAMA_API struct llama_sampler * llama_sampler_clone (const struct llama_sampler * smpl);
1053
+ // important: do not free if the sampler has been added to a llama_sampler_chain (via llama_sampler_chain_add)
1054
+ LLAMA_API void llama_sampler_free ( struct llama_sampler * smpl);
1055
+
1056
+ // llama_sampler_chain
1057
+ // a type of llama_sampler that can chain multiple samplers one after another
1058
+
1059
+ LLAMA_API struct llama_sampler * llama_sampler_chain_init(struct llama_sampler_chain_params params);
1060
+
1061
+ // important: takes ownership of the sampler object and will free it when llama_sampler_free is called
1062
+ LLAMA_API void llama_sampler_chain_add( struct llama_sampler * chain, struct llama_sampler * smpl);
1063
+ LLAMA_API struct llama_sampler * llama_sampler_chain_get(const struct llama_sampler * chain, int32_t i);
1064
+ LLAMA_API int llama_sampler_chain_n (const struct llama_sampler * chain);
1065
+
1066
+ // after removing a sampler, the chain will no longer own it, and it will not be freed when the chain is freed
1067
+ LLAMA_API struct llama_sampler * llama_sampler_chain_remove( struct llama_sampler * chain, int32_t i);
1068
+
1069
+ // available samplers:
1070
+
1071
+ LLAMA_API struct llama_sampler * llama_sampler_init_greedy(void);
1072
+ LLAMA_API struct llama_sampler * llama_sampler_init_dist (uint32_t seed);
1061
1073
 
1062
1074
  /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
1063
- LLAMA_API void llama_sample_softmax(
1064
- struct llama_context * ctx,
1065
- llama_token_data_array * candidates);
1075
+ /// NOTE: Avoid using on the full vocabulary as the sorting can become slow. For example, apply top-k or top-p sampling first.
1076
+ DEPRECATED(LLAMA_API struct llama_sampler * llama_sampler_init_softmax (void),
1077
+ "will be removed in the future (see https://github.com/ggerganov/llama.cpp/pull/9896#discussion_r1800920915)");
1066
1078
 
1067
1079
  /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1068
- LLAMA_API void llama_sample_top_k(
1069
- struct llama_context * ctx,
1070
- llama_token_data_array * candidates,
1071
- int32_t k,
1072
- size_t min_keep);
1080
+ LLAMA_API struct llama_sampler * llama_sampler_init_top_k (int32_t k);
1073
1081
 
1074
1082
  /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1075
- LLAMA_API void llama_sample_top_p(
1076
- struct llama_context * ctx,
1077
- llama_token_data_array * candidates,
1078
- float p,
1079
- size_t min_keep);
1083
+ LLAMA_API struct llama_sampler * llama_sampler_init_top_p (float p, size_t min_keep);
1080
1084
 
1081
1085
  /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
1082
- LLAMA_API void llama_sample_min_p(
1083
- struct llama_context * ctx,
1084
- llama_token_data_array * candidates,
1085
- float p,
1086
- size_t min_keep);
1087
-
1088
- /// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
1089
- LLAMA_API void llama_sample_tail_free(
1090
- struct llama_context * ctx,
1091
- llama_token_data_array * candidates,
1092
- float z,
1093
- size_t min_keep);
1086
+ LLAMA_API struct llama_sampler * llama_sampler_init_min_p (float p, size_t min_keep);
1094
1087
 
1095
1088
  /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
1096
- LLAMA_API void llama_sample_typical(
1097
- struct llama_context * ctx,
1098
- llama_token_data_array * candidates,
1099
- float p,
1100
- size_t min_keep);
1089
+ LLAMA_API struct llama_sampler * llama_sampler_init_typical (float p, size_t min_keep);
1101
1090
 
1102
- /// @details Dynamic temperature implementation described in the paper https://arxiv.org/abs/2309.02772.
1103
- LLAMA_API void llama_sample_entropy(
1104
- struct llama_context * ctx,
1105
- llama_token_data_array * candidates_p,
1106
- float min_temp,
1107
- float max_temp,
1108
- float exponent_val);
1091
+ /// #details Updates the logits l_i` = l_i/t. When t <= 0.0f, the maximum logit is kept at it's original value, the rest are set to -inf
1092
+ LLAMA_API struct llama_sampler * llama_sampler_init_temp (float t);
1109
1093
 
1110
- LLAMA_API void llama_sample_temp(
1111
- struct llama_context * ctx,
1112
- llama_token_data_array * candidates,
1113
- float temp);
1094
+ /// @details Dynamic temperature implementation (a.k.a. entropy) described in the paper https://arxiv.org/abs/2309.02772.
1095
+ LLAMA_API struct llama_sampler * llama_sampler_init_temp_ext (float t, float delta, float exponent);
1096
+
1097
+ /// @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335
1098
+ LLAMA_API struct llama_sampler * llama_sampler_init_xtc (float p, float t, size_t min_keep, uint32_t seed);
1114
1099
 
1115
1100
  /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1116
1101
  /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
@@ -1118,36 +1103,94 @@ extern "C" {
1118
1103
  /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1119
1104
  /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
1120
1105
  /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1121
- LLAMA_API llama_token llama_sample_token_mirostat(
1122
- struct llama_context * ctx,
1123
- llama_token_data_array * candidates,
1124
- float tau,
1125
- float eta,
1126
- int32_t m,
1127
- float * mu);
1106
+ LLAMA_API struct llama_sampler * llama_sampler_init_mirostat(
1107
+ int32_t n_vocab,
1108
+ uint32_t seed,
1109
+ float tau,
1110
+ float eta,
1111
+ int32_t m);
1128
1112
 
1129
1113
  /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1130
1114
  /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
1131
1115
  /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
1132
1116
  /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1133
1117
  /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1134
- LLAMA_API llama_token llama_sample_token_mirostat_v2(
1135
- struct llama_context * ctx,
1136
- llama_token_data_array * candidates,
1137
- float tau,
1138
- float eta,
1139
- float * mu);
1140
-
1141
- /// @details Selects the token with the highest probability.
1142
- /// Does not compute the token probabilities. Use llama_sample_softmax() instead.
1143
- LLAMA_API llama_token llama_sample_token_greedy(
1144
- struct llama_context * ctx,
1145
- llama_token_data_array * candidates);
1118
+ LLAMA_API struct llama_sampler * llama_sampler_init_mirostat_v2(
1119
+ uint32_t seed,
1120
+ float tau,
1121
+ float eta);
1122
+
1123
+ LLAMA_API struct llama_sampler * llama_sampler_init_grammar(
1124
+ const struct llama_model * model,
1125
+ const char * grammar_str,
1126
+ const char * grammar_root);
1127
+
1128
+ LLAMA_API struct llama_sampler * llama_sampler_init_penalties(
1129
+ int32_t n_vocab, // llama_n_vocab()
1130
+ llama_token special_eos_id, // llama_token_eos()
1131
+ llama_token linefeed_id, // llama_token_nl()
1132
+ int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size)
1133
+ float penalty_repeat, // 1.0 = disabled
1134
+ float penalty_freq, // 0.0 = disabled
1135
+ float penalty_present, // 0.0 = disabled
1136
+ bool penalize_nl, // consider newlines as a repeatable token
1137
+ bool ignore_eos); // ignore the end-of-sequence token
1138
+
1139
+ /// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982
1140
+ LLAMA_API struct llama_sampler * llama_sampler_init_dry(
1141
+ const struct llama_model * model,
1142
+ float dry_multiplier,
1143
+ float dry_base,
1144
+ int32_t dry_allowed_length,
1145
+ int32_t dry_penalty_last_n,
1146
+ const char ** seq_breakers,
1147
+ size_t num_breakers);
1148
+
1149
+ LLAMA_API struct llama_sampler * llama_sampler_init_logit_bias(
1150
+ int32_t n_vocab,
1151
+ int32_t n_logit_bias,
1152
+ const llama_logit_bias * logit_bias);
1153
+
1154
+ // this sampler is meant to be used for fill-in-the-middle infilling
1155
+ // it's supposed to be used after top_k + top_p sampling
1156
+ //
1157
+ // 1. if the sum of the EOG probs times the number of candidates is higher than the sum of the other probs -> pick EOG
1158
+ // 2. combine probs of tokens that have the same prefix
1159
+ //
1160
+ // example:
1161
+ //
1162
+ // - before:
1163
+ // "hel": 0.5
1164
+ // "hell": 0.2
1165
+ // "hello": 0.1
1166
+ // "dummy": 0.1
1167
+ //
1168
+ // - after:
1169
+ // "hel": 0.8
1170
+ // "dummy": 0.1
1171
+ //
1172
+ // 3. discard non-EOG tokens with low prob
1173
+ // 4. if no tokens are left -> pick EOT
1174
+ //
1175
+ LLAMA_API struct llama_sampler * llama_sampler_init_infill(const struct llama_model * model);
1146
1176
 
1147
- /// @details Randomly selects a token from the candidates based on their probabilities using the RNG of ctx.
1148
- LLAMA_API llama_token llama_sample_token(
1149
- struct llama_context * ctx,
1150
- llama_token_data_array * candidates);
1177
+ // Returns the seed used by the sampler if applicable, LLAMA_DEFAULT_SEED otherwise
1178
+ LLAMA_API uint32_t llama_sampler_get_seed(const struct llama_sampler * smpl);
1179
+
1180
+ /// @details Sample and accept a token from the idx-th output of the last evaluation
1181
+ //
1182
+ // Shorthand for:
1183
+ // const auto * logits = llama_get_logits_ith(ctx, idx);
1184
+ // llama_token_data_array cur_p = { ... init from logits ... };
1185
+ // llama_sampler_apply(smpl, &cur_p);
1186
+ // auto token = cur_p.data[cur_p.selected].id;
1187
+ // llama_sampler_accept(smpl, token);
1188
+ // return token;
1189
+ // Returns the sampled token
1190
+ LLAMA_API llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx);
1191
+
1192
+ // TODO: extend in the future
1193
+ //LLAMA_API void llama_decode_with_sampler(struct llama_context * ctx, struct llama_sampler * smpl, struct llama_batch batch, ...);
1151
1194
 
1152
1195
  //
1153
1196
  // Model split
@@ -1163,12 +1206,6 @@ extern "C" {
1163
1206
  // Returns the split_prefix length.
1164
1207
  LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count);
1165
1208
 
1166
- // Performance information
1167
- LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
1168
-
1169
- LLAMA_API void llama_print_timings(struct llama_context * ctx);
1170
- LLAMA_API void llama_reset_timings(struct llama_context * ctx);
1171
-
1172
1209
  // Print system information
1173
1210
  LLAMA_API const char * llama_print_system_info(void);
1174
1211
 
@@ -1176,65 +1213,39 @@ extern "C" {
1176
1213
  // If this is not called, or NULL is supplied, everything is output on stderr.
1177
1214
  LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data);
1178
1215
 
1179
- LLAMA_API void llama_dump_timing_info_yaml(FILE * stream, const struct llama_context * ctx);
1180
-
1181
- #ifdef __cplusplus
1182
- }
1183
- #endif
1184
-
1185
- // Internal API to be implemented by llama.cpp and used by tests/benchmarks only
1186
- #ifdef LLAMA_API_INTERNAL
1187
-
1188
- #include <random>
1189
- #include <string>
1190
- #include <vector>
1191
-
1192
- struct ggml_tensor;
1193
-
1194
- const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
1195
- struct llama_context * ctx
1196
- );
1197
-
1198
- struct llama_partial_utf8 {
1199
- uint32_t value; // bit value so far (unshifted)
1200
- int n_remain; // num bytes remaining; -1 indicates invalid sequence
1201
- };
1202
-
1203
- struct llama_grammar_candidate {
1204
- size_t index;
1205
- const uint32_t * code_points;
1206
- llama_partial_utf8 partial_utf8;
1207
- };
1208
-
1209
- using llama_grammar_rule = std::vector< llama_grammar_element>;
1210
- using llama_grammar_stack = std::vector<const llama_grammar_element *>;
1216
+ //
1217
+ // Performance utils
1218
+ //
1219
+ // NOTE: Used by llama.cpp examples, avoid using in third-party apps. Instead, do your own performance measurements.
1220
+ //
1211
1221
 
1212
- using llama_grammar_rules = std::vector<llama_grammar_rule>;
1213
- using llama_grammar_stacks = std::vector<llama_grammar_stack>;
1214
- using llama_grammar_candidates = std::vector<llama_grammar_candidate>;
1222
+ struct llama_perf_context_data {
1223
+ double t_start_ms;
1224
+ double t_load_ms;
1225
+ double t_p_eval_ms;
1226
+ double t_eval_ms;
1215
1227
 
1216
- const llama_grammar_rules & llama_grammar_get_rules (const struct llama_grammar * grammar);
1217
- llama_grammar_stacks & llama_grammar_get_stacks( struct llama_grammar * grammar);
1228
+ int32_t n_p_eval;
1229
+ int32_t n_eval;
1230
+ };
1218
1231
 
1219
- void llama_grammar_accept(
1220
- const llama_grammar_rules & rules,
1221
- const llama_grammar_stacks & stacks,
1222
- const uint32_t chr,
1223
- llama_grammar_stacks & new_stacks);
1232
+ struct llama_perf_sampler_data {
1233
+ double t_sample_ms;
1224
1234
 
1225
- std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
1226
- const llama_grammar_rules & rules,
1227
- const llama_grammar_stack & stack,
1228
- const llama_grammar_candidates & candidates);
1235
+ int32_t n_sample;
1236
+ };
1229
1237
 
1230
- std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
1231
- const std::string & src,
1232
- llama_partial_utf8 partial_start);
1238
+ LLAMA_API struct llama_perf_context_data llama_perf_context (const struct llama_context * ctx);
1239
+ LLAMA_API void llama_perf_context_print(const struct llama_context * ctx);
1240
+ LLAMA_API void llama_perf_context_reset( struct llama_context * ctx);
1233
1241
 
1234
- // Randomly selects a token from the candidates based on their probabilities using given std::mt19937.
1235
- // This is a temporary workaround in order to fix race conditions when sampling with multiple sequences.
1236
- llama_token llama_sample_token_with_rng(struct llama_context * ctx, llama_token_data_array * candidates, std::mt19937 & rng);
1242
+ // NOTE: the following work only with samplers constructed via llama_sampler_chain_init
1243
+ LLAMA_API struct llama_perf_sampler_data llama_perf_sampler (const struct llama_sampler * chain);
1244
+ LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain);
1245
+ LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain);
1237
1246
 
1238
- #endif // LLAMA_API_INTERNAL
1247
+ #ifdef __cplusplus
1248
+ }
1249
+ #endif
1239
1250
 
1240
1251
  #endif // LLAMA_H