whispercpp 1.3.1 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (797) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +4 -3
  3. data/README.md +92 -31
  4. data/Rakefile +26 -7
  5. data/ext/.gitignore +5 -7
  6. data/ext/dependencies.rb +61 -0
  7. data/ext/extconf.rb +21 -198
  8. data/ext/options.rb +221 -0
  9. data/ext/ruby_whisper.c +159 -0
  10. data/ext/ruby_whisper.h +17 -2
  11. data/ext/ruby_whisper_context.c +641 -0
  12. data/ext/ruby_whisper_error.c +52 -0
  13. data/ext/ruby_whisper_model.c +232 -0
  14. data/ext/ruby_whisper_params.c +1301 -0
  15. data/ext/ruby_whisper_segment.c +143 -0
  16. data/ext/ruby_whisper_transcribe.cpp +87 -0
  17. data/ext/ruby_whisper_vad_params.c +288 -0
  18. data/ext/sources/.dockerignore +3 -0
  19. data/ext/sources/.github/workflows/bindings-ruby.yml +21 -0
  20. data/ext/sources/CMakeGraphVizOptions.cmake +8 -0
  21. data/ext/sources/CMakeLists.txt +251 -0
  22. data/ext/sources/bindings/javascript/CMakeLists.txt +41 -0
  23. data/ext/sources/bindings/javascript/emscripten.cpp +93 -0
  24. data/ext/sources/bindings/javascript/libwhisper.worker.js +1 -0
  25. data/ext/sources/bindings/javascript/package-tmpl.json +26 -0
  26. data/ext/sources/bindings/javascript/package.json +26 -0
  27. data/ext/sources/bindings/javascript/whisper.js +19 -0
  28. data/ext/sources/build-xcframework.sh +547 -0
  29. data/ext/sources/ci/run.sh +336 -0
  30. data/ext/sources/close-issue.yml +28 -0
  31. data/ext/sources/cmake/DefaultTargetOptions.cmake +16 -0
  32. data/ext/sources/cmake/FindFFmpeg.cmake +163 -0
  33. data/ext/sources/cmake/build-info.cmake +60 -0
  34. data/ext/sources/cmake/git-vars.cmake +22 -0
  35. data/ext/sources/cmake/whisper-config.cmake.in +65 -0
  36. data/ext/sources/cmake/whisper.pc.in +10 -0
  37. data/ext/sources/examples/CMakeLists.txt +124 -0
  38. data/ext/sources/examples/addon.node/CMakeLists.txt +31 -0
  39. data/ext/sources/examples/addon.node/__test__/whisper.spec.js +37 -0
  40. data/ext/sources/examples/addon.node/addon.cpp +438 -0
  41. data/ext/sources/examples/addon.node/index.js +54 -0
  42. data/ext/sources/examples/addon.node/package.json +16 -0
  43. data/ext/sources/examples/bench/CMakeLists.txt +8 -0
  44. data/ext/sources/examples/bench/bench.cpp +175 -0
  45. data/ext/sources/examples/bench.wasm/CMakeLists.txt +49 -0
  46. data/ext/sources/examples/bench.wasm/emscripten.cpp +87 -0
  47. data/ext/sources/examples/bench.wasm/index-tmpl.html +284 -0
  48. data/ext/sources/examples/cli/CMakeLists.txt +8 -0
  49. data/ext/sources/examples/cli/cli.cpp +1294 -0
  50. data/ext/sources/examples/coi-serviceworker.js +146 -0
  51. data/ext/sources/examples/command/CMakeLists.txt +10 -0
  52. data/ext/sources/examples/command/command.cpp +776 -0
  53. data/ext/sources/examples/command/commands.txt +9 -0
  54. data/ext/sources/examples/command.wasm/CMakeLists.txt +50 -0
  55. data/ext/sources/examples/command.wasm/emscripten.cpp +327 -0
  56. data/ext/sources/examples/command.wasm/index-tmpl.html +414 -0
  57. data/ext/sources/examples/common-ggml.cpp +238 -0
  58. data/ext/sources/examples/common-ggml.h +18 -0
  59. data/ext/sources/examples/common-sdl.cpp +227 -0
  60. data/ext/sources/examples/common-sdl.h +49 -0
  61. data/ext/sources/examples/common-whisper.cpp +168 -0
  62. data/ext/sources/examples/common-whisper.h +24 -0
  63. data/ext/sources/examples/common.cpp +675 -0
  64. data/ext/sources/examples/common.h +322 -0
  65. data/ext/sources/examples/deprecation-warning/CMakeLists.txt +6 -0
  66. data/ext/sources/examples/deprecation-warning/deprecation-warning.cpp +38 -0
  67. data/ext/sources/examples/ffmpeg-transcode.cpp +368 -0
  68. data/ext/sources/examples/generate-karaoke.sh +57 -0
  69. data/ext/sources/examples/grammar-parser.cpp +423 -0
  70. data/ext/sources/examples/grammar-parser.h +29 -0
  71. data/ext/sources/examples/helpers.js +191 -0
  72. data/ext/sources/examples/json.hpp +24596 -0
  73. data/ext/sources/examples/livestream.sh +112 -0
  74. data/ext/sources/examples/lsp/CMakeLists.txt +9 -0
  75. data/ext/sources/examples/lsp/lsp.cpp +467 -0
  76. data/ext/sources/examples/lsp/whisper.vim +362 -0
  77. data/ext/sources/examples/miniaudio.h +93468 -0
  78. data/ext/sources/examples/python/test_whisper_processor.py +7 -0
  79. data/ext/sources/examples/python/whisper_processor.py +54 -0
  80. data/ext/sources/examples/quantize/CMakeLists.txt +6 -0
  81. data/ext/sources/examples/quantize/quantize.cpp +223 -0
  82. data/ext/sources/examples/server/CMakeLists.txt +12 -0
  83. data/ext/sources/examples/server/bench.js +29 -0
  84. data/ext/sources/examples/server/httplib.h +10497 -0
  85. data/ext/sources/examples/server/server.cpp +1091 -0
  86. data/ext/sources/examples/server.py +115 -0
  87. data/ext/sources/examples/stb_vorbis.c +5584 -0
  88. data/ext/sources/examples/stream/CMakeLists.txt +10 -0
  89. data/ext/sources/examples/stream/stream.cpp +429 -0
  90. data/ext/sources/examples/stream.wasm/CMakeLists.txt +49 -0
  91. data/ext/sources/examples/stream.wasm/emscripten.cpp +216 -0
  92. data/ext/sources/examples/stream.wasm/index-tmpl.html +414 -0
  93. data/ext/sources/examples/sycl/CMakeLists.txt +9 -0
  94. data/ext/sources/examples/sycl/build.sh +22 -0
  95. data/ext/sources/examples/sycl/ls-sycl-device.cpp +11 -0
  96. data/ext/sources/examples/sycl/run-whisper.sh +17 -0
  97. data/ext/sources/examples/talk-llama/CMakeLists.txt +40 -0
  98. data/ext/sources/examples/talk-llama/eleven-labs.py +80 -0
  99. data/ext/sources/examples/talk-llama/llama-adapter.cpp +388 -0
  100. data/ext/sources/examples/talk-llama/llama-adapter.h +76 -0
  101. data/ext/sources/examples/talk-llama/llama-arch.cpp +1746 -0
  102. data/ext/sources/examples/talk-llama/llama-arch.h +437 -0
  103. data/ext/sources/examples/talk-llama/llama-batch.cpp +374 -0
  104. data/ext/sources/examples/talk-llama/llama-batch.h +89 -0
  105. data/ext/sources/examples/talk-llama/llama-chat.cpp +663 -0
  106. data/ext/sources/examples/talk-llama/llama-chat.h +58 -0
  107. data/ext/sources/examples/talk-llama/llama-context.cpp +2676 -0
  108. data/ext/sources/examples/talk-llama/llama-context.h +276 -0
  109. data/ext/sources/examples/talk-llama/llama-cparams.cpp +5 -0
  110. data/ext/sources/examples/talk-llama/llama-cparams.h +41 -0
  111. data/ext/sources/examples/talk-llama/llama-grammar.cpp +1229 -0
  112. data/ext/sources/examples/talk-llama/llama-grammar.h +173 -0
  113. data/ext/sources/examples/talk-llama/llama-graph.cpp +1618 -0
  114. data/ext/sources/examples/talk-llama/llama-graph.h +640 -0
  115. data/ext/sources/examples/talk-llama/llama-hparams.cpp +95 -0
  116. data/ext/sources/examples/talk-llama/llama-hparams.h +190 -0
  117. data/ext/sources/examples/talk-llama/llama-impl.cpp +167 -0
  118. data/ext/sources/examples/talk-llama/llama-impl.h +61 -0
  119. data/ext/sources/examples/talk-llama/llama-io.cpp +15 -0
  120. data/ext/sources/examples/talk-llama/llama-io.h +35 -0
  121. data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +2739 -0
  122. data/ext/sources/examples/talk-llama/llama-kv-cache.h +502 -0
  123. data/ext/sources/examples/talk-llama/llama-kv-cells.h +379 -0
  124. data/ext/sources/examples/talk-llama/llama-memory.cpp +1 -0
  125. data/ext/sources/examples/talk-llama/llama-memory.h +32 -0
  126. data/ext/sources/examples/talk-llama/llama-mmap.cpp +600 -0
  127. data/ext/sources/examples/talk-llama/llama-mmap.h +68 -0
  128. data/ext/sources/examples/talk-llama/llama-model-loader.cpp +1138 -0
  129. data/ext/sources/examples/talk-llama/llama-model-loader.h +169 -0
  130. data/ext/sources/examples/talk-llama/llama-model-saver.cpp +281 -0
  131. data/ext/sources/examples/talk-llama/llama-model-saver.h +37 -0
  132. data/ext/sources/examples/talk-llama/llama-model.cpp +13814 -0
  133. data/ext/sources/examples/talk-llama/llama-model.h +425 -0
  134. data/ext/sources/examples/talk-llama/llama-quant.cpp +966 -0
  135. data/ext/sources/examples/talk-llama/llama-quant.h +1 -0
  136. data/ext/sources/examples/talk-llama/llama-sampling.cpp +2575 -0
  137. data/ext/sources/examples/talk-llama/llama-sampling.h +32 -0
  138. data/ext/sources/examples/talk-llama/llama-vocab.cpp +3340 -0
  139. data/ext/sources/examples/talk-llama/llama-vocab.h +131 -0
  140. data/ext/sources/examples/talk-llama/llama.cpp +354 -0
  141. data/ext/sources/examples/talk-llama/llama.h +1377 -0
  142. data/ext/sources/examples/talk-llama/prompts/talk-alpaca.txt +23 -0
  143. data/ext/sources/examples/talk-llama/speak +40 -0
  144. data/ext/sources/examples/talk-llama/speak.bat +1 -0
  145. data/ext/sources/examples/talk-llama/speak.ps1 +14 -0
  146. data/ext/sources/examples/talk-llama/talk-llama.cpp +808 -0
  147. data/ext/sources/examples/talk-llama/unicode-data.cpp +7034 -0
  148. data/ext/sources/examples/talk-llama/unicode-data.h +20 -0
  149. data/ext/sources/examples/talk-llama/unicode.cpp +849 -0
  150. data/ext/sources/examples/talk-llama/unicode.h +66 -0
  151. data/ext/sources/examples/vad-speech-segments/CMakeLists.txt +8 -0
  152. data/ext/sources/examples/vad-speech-segments/speech.cpp +143 -0
  153. data/ext/sources/examples/wchess/CMakeLists.txt +10 -0
  154. data/ext/sources/examples/wchess/libwchess/CMakeLists.txt +19 -0
  155. data/ext/sources/examples/wchess/libwchess/Chessboard.cpp +803 -0
  156. data/ext/sources/examples/wchess/libwchess/Chessboard.h +33 -0
  157. data/ext/sources/examples/wchess/libwchess/WChess.cpp +193 -0
  158. data/ext/sources/examples/wchess/libwchess/WChess.h +63 -0
  159. data/ext/sources/examples/wchess/libwchess/test-chessboard.cpp +117 -0
  160. data/ext/sources/examples/wchess/wchess.cmd/CMakeLists.txt +8 -0
  161. data/ext/sources/examples/wchess/wchess.cmd/wchess.cmd.cpp +249 -0
  162. data/ext/sources/examples/whisper.wasm/CMakeLists.txt +50 -0
  163. data/ext/sources/examples/whisper.wasm/emscripten.cpp +118 -0
  164. data/ext/sources/examples/whisper.wasm/index-tmpl.html +658 -0
  165. data/ext/sources/ggml/CMakeLists.txt +390 -0
  166. data/ext/sources/ggml/cmake/BuildTypes.cmake +54 -0
  167. data/ext/sources/ggml/cmake/GitVars.cmake +22 -0
  168. data/ext/sources/ggml/cmake/common.cmake +26 -0
  169. data/ext/sources/ggml/cmake/ggml-config.cmake.in +152 -0
  170. data/ext/{ggml → sources/ggml}/include/ggml-alloc.h +1 -1
  171. data/ext/{ggml → sources/ggml}/include/ggml-backend.h +9 -7
  172. data/ext/{ggml → sources/ggml}/include/ggml-cpp.h +2 -1
  173. data/ext/{ggml → sources/ggml}/include/ggml-cpu.h +9 -1
  174. data/ext/{ggml → sources/ggml}/include/ggml-metal.h +1 -1
  175. data/ext/{ggml → sources/ggml}/include/ggml-opt.h +49 -28
  176. data/ext/{ggml → sources/ggml}/include/ggml-rpc.h +6 -1
  177. data/ext/{ggml → sources/ggml}/include/ggml-vulkan.h +0 -2
  178. data/ext/{ggml → sources/ggml}/include/ggml.h +182 -265
  179. data/ext/sources/ggml/include/gguf.h +202 -0
  180. data/ext/sources/ggml/src/CMakeLists.txt +346 -0
  181. data/ext/{ggml → sources/ggml}/src/ggml-alloc.c +34 -29
  182. data/ext/sources/ggml/src/ggml-amx/CMakeLists.txt +107 -0
  183. data/ext/{ggml → sources/ggml}/src/ggml-backend-impl.h +1 -2
  184. data/ext/{ggml → sources/ggml}/src/ggml-backend-reg.cpp +87 -53
  185. data/ext/{ggml → sources/ggml}/src/ggml-backend.cpp +26 -14
  186. data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +87 -0
  187. data/ext/sources/ggml/src/ggml-cann/CMakeLists.txt +74 -0
  188. data/ext/sources/ggml/src/ggml-cann/Doxyfile +2579 -0
  189. data/ext/{ggml → sources/ggml}/src/ggml-cann/acl_tensor.cpp +10 -4
  190. data/ext/{ggml → sources/ggml}/src/ggml-cann/acl_tensor.h +5 -5
  191. data/ext/{ggml → sources/ggml}/src/ggml-cann/aclnn_ops.cpp +1272 -1506
  192. data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +1125 -0
  193. data/ext/{ggml → sources/ggml}/src/ggml-cann/common.h +135 -1
  194. data/ext/{ggml → sources/ggml}/src/ggml-cann/ggml-cann.cpp +564 -146
  195. data/ext/sources/ggml/src/ggml-cann/kernels/CMakeLists.txt +30 -0
  196. data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/dup.cpp +3 -5
  197. data/ext/{ggml → sources/ggml}/src/ggml-common.h +12 -8
  198. data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +504 -0
  199. data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/amx.cpp +2 -1
  200. data/ext/sources/ggml/src/ggml-cpu/binary-ops.cpp +158 -0
  201. data/ext/sources/ggml/src/ggml-cpu/binary-ops.h +16 -0
  202. data/ext/sources/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +100 -0
  203. data/ext/sources/ggml/src/ggml-cpu/common.h +72 -0
  204. data/ext/{ggml → sources/ggml}/src/ggml-cpu/cpu-feats-x86.cpp +5 -1
  205. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +6431 -0
  206. data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-impl.h +163 -41
  207. data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-quants.c +4029 -1117
  208. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +3510 -0
  209. data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu.cpp +67 -18
  210. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +337 -0
  211. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.h +95 -0
  212. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +482 -0
  213. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.h +17 -0
  214. data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +3544 -0
  215. data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.h +14 -0
  216. data/ext/sources/ggml/src/ggml-cpu/ops.cpp +8903 -0
  217. data/ext/sources/ggml/src/ggml-cpu/ops.h +110 -0
  218. data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +892 -0
  219. data/ext/sources/ggml/src/ggml-cpu/unary-ops.cpp +186 -0
  220. data/ext/sources/ggml/src/ggml-cpu/unary-ops.h +28 -0
  221. data/ext/sources/ggml/src/ggml-cpu/vec.cpp +252 -0
  222. data/ext/sources/ggml/src/ggml-cpu/vec.h +818 -0
  223. data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +184 -0
  224. data/ext/sources/ggml/src/ggml-cuda/acc.cu +61 -0
  225. data/ext/sources/ggml/src/ggml-cuda/acc.cuh +5 -0
  226. data/ext/sources/ggml/src/ggml-cuda/arange.cu +34 -0
  227. data/ext/sources/ggml/src/ggml-cuda/arange.cuh +5 -0
  228. data/ext/sources/ggml/src/ggml-cuda/argmax.cu +91 -0
  229. data/ext/sources/ggml/src/ggml-cuda/argmax.cuh +3 -0
  230. data/ext/sources/ggml/src/ggml-cuda/argsort.cu +104 -0
  231. data/ext/sources/ggml/src/ggml-cuda/argsort.cuh +3 -0
  232. data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +363 -0
  233. data/ext/sources/ggml/src/ggml-cuda/binbcast.cuh +9 -0
  234. data/ext/sources/ggml/src/ggml-cuda/clamp.cu +45 -0
  235. data/ext/sources/ggml/src/ggml-cuda/clamp.cuh +5 -0
  236. data/ext/sources/ggml/src/ggml-cuda/common.cuh +828 -0
  237. data/ext/sources/ggml/src/ggml-cuda/concat.cu +221 -0
  238. data/ext/sources/ggml/src/ggml-cuda/concat.cuh +5 -0
  239. data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cu +89 -0
  240. data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cuh +5 -0
  241. data/ext/sources/ggml/src/ggml-cuda/convert.cu +730 -0
  242. data/ext/sources/ggml/src/ggml-cuda/convert.cuh +26 -0
  243. data/ext/sources/ggml/src/ggml-cuda/count-equal.cu +64 -0
  244. data/ext/sources/ggml/src/ggml-cuda/count-equal.cuh +5 -0
  245. data/ext/sources/ggml/src/ggml-cuda/cp-async.cuh +57 -0
  246. data/ext/sources/ggml/src/ggml-cuda/cpy.cu +705 -0
  247. data/ext/sources/ggml/src/ggml-cuda/cpy.cuh +11 -0
  248. data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cu +189 -0
  249. data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cuh +7 -0
  250. data/ext/sources/ggml/src/ggml-cuda/dequantize.cuh +103 -0
  251. data/ext/sources/ggml/src/ggml-cuda/diagmask.cu +40 -0
  252. data/ext/sources/ggml/src/ggml-cuda/diagmask.cuh +5 -0
  253. data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +881 -0
  254. data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +1471 -0
  255. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cu +357 -0
  256. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cuh +3 -0
  257. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cu +365 -0
  258. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cuh +3 -0
  259. data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f16.cuh +482 -0
  260. data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f32.cuh +472 -0
  261. data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +634 -0
  262. data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +3 -0
  263. data/ext/sources/ggml/src/ggml-cuda/fattn.cu +346 -0
  264. data/ext/sources/ggml/src/ggml-cuda/fattn.cuh +3 -0
  265. data/ext/sources/ggml/src/ggml-cuda/getrows.cu +275 -0
  266. data/ext/sources/ggml/src/ggml-cuda/getrows.cuh +15 -0
  267. data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +3505 -0
  268. data/ext/sources/ggml/src/ggml-cuda/gla.cu +93 -0
  269. data/ext/sources/ggml/src/ggml-cuda/gla.cuh +3 -0
  270. data/ext/sources/ggml/src/ggml-cuda/im2col.cu +103 -0
  271. data/ext/sources/ggml/src/ggml-cuda/im2col.cuh +5 -0
  272. data/ext/sources/ggml/src/ggml-cuda/mma.cuh +396 -0
  273. data/ext/sources/ggml/src/ggml-cuda/mmq.cu +324 -0
  274. data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +3217 -0
  275. data/ext/sources/ggml/src/ggml-cuda/mmv.cu +336 -0
  276. data/ext/sources/ggml/src/ggml-cuda/mmv.cuh +12 -0
  277. data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +595 -0
  278. data/ext/sources/ggml/src/ggml-cuda/mmvq.cuh +12 -0
  279. data/ext/sources/ggml/src/ggml-cuda/norm.cu +458 -0
  280. data/ext/sources/ggml/src/ggml-cuda/norm.cuh +11 -0
  281. data/ext/sources/ggml/src/ggml-cuda/opt-step-adamw.cu +78 -0
  282. data/ext/sources/ggml/src/ggml-cuda/opt-step-adamw.cuh +5 -0
  283. data/ext/sources/ggml/src/ggml-cuda/out-prod.cu +68 -0
  284. data/ext/sources/ggml/src/ggml-cuda/out-prod.cuh +3 -0
  285. data/ext/sources/ggml/src/ggml-cuda/pad.cu +49 -0
  286. data/ext/sources/ggml/src/ggml-cuda/pad.cuh +5 -0
  287. data/ext/sources/ggml/src/ggml-cuda/pool2d.cu +94 -0
  288. data/ext/sources/ggml/src/ggml-cuda/pool2d.cuh +5 -0
  289. data/ext/sources/ggml/src/ggml-cuda/quantize.cu +190 -0
  290. data/ext/sources/ggml/src/ggml-cuda/quantize.cuh +27 -0
  291. data/ext/sources/ggml/src/ggml-cuda/rope.cu +456 -0
  292. data/ext/sources/ggml/src/ggml-cuda/rope.cuh +7 -0
  293. data/ext/sources/ggml/src/ggml-cuda/scale.cu +31 -0
  294. data/ext/sources/ggml/src/ggml-cuda/scale.cuh +5 -0
  295. data/ext/sources/ggml/src/ggml-cuda/softmax.cu +283 -0
  296. data/ext/sources/ggml/src/ggml-cuda/softmax.cuh +7 -0
  297. data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +148 -0
  298. data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cuh +3 -0
  299. data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cu +153 -0
  300. data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cuh +3 -0
  301. data/ext/sources/ggml/src/ggml-cuda/sum.cu +45 -0
  302. data/ext/sources/ggml/src/ggml-cuda/sum.cuh +5 -0
  303. data/ext/sources/ggml/src/ggml-cuda/sumrows.cu +39 -0
  304. data/ext/sources/ggml/src/ggml-cuda/sumrows.cuh +5 -0
  305. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_16.cu +5 -0
  306. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_8.cu +10 -0
  307. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_1.cu +10 -0
  308. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_2.cu +10 -0
  309. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +10 -0
  310. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_16.cu +5 -0
  311. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +10 -0
  312. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_8.cu +10 -0
  313. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_1.cu +10 -0
  314. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_2.cu +10 -0
  315. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_16.cu +5 -0
  316. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_2.cu +10 -0
  317. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +10 -0
  318. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_8.cu +10 -0
  319. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_64-ncols2_1.cu +10 -0
  320. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_1.cu +10 -0
  321. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_2.cu +10 -0
  322. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +10 -0
  323. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_8.cu +10 -0
  324. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +5 -0
  325. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +5 -0
  326. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +5 -0
  327. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +5 -0
  328. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +5 -0
  329. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +5 -0
  330. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +5 -0
  331. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +5 -0
  332. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +5 -0
  333. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +5 -0
  334. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +5 -0
  335. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +5 -0
  336. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +5 -0
  337. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +5 -0
  338. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +5 -0
  339. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +5 -0
  340. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +5 -0
  341. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +5 -0
  342. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +5 -0
  343. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +5 -0
  344. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +5 -0
  345. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +5 -0
  346. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +5 -0
  347. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +5 -0
  348. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +5 -0
  349. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +5 -0
  350. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +5 -0
  351. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +5 -0
  352. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +5 -0
  353. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +5 -0
  354. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +5 -0
  355. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +5 -0
  356. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +5 -0
  357. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +5 -0
  358. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +5 -0
  359. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +5 -0
  360. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +5 -0
  361. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +5 -0
  362. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +5 -0
  363. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +5 -0
  364. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +5 -0
  365. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +5 -0
  366. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +5 -0
  367. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +5 -0
  368. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +5 -0
  369. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +5 -0
  370. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +5 -0
  371. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +5 -0
  372. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +5 -0
  373. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +5 -0
  374. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +5 -0
  375. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +5 -0
  376. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +5 -0
  377. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +5 -0
  378. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +5 -0
  379. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +5 -0
  380. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +5 -0
  381. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +5 -0
  382. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +5 -0
  383. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +5 -0
  384. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +5 -0
  385. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +5 -0
  386. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +5 -0
  387. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +5 -0
  388. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +5 -0
  389. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +5 -0
  390. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +5 -0
  391. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +5 -0
  392. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +5 -0
  393. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +5 -0
  394. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +5 -0
  395. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +5 -0
  396. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +5 -0
  397. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +5 -0
  398. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +5 -0
  399. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +5 -0
  400. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +5 -0
  401. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +5 -0
  402. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +5 -0
  403. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +5 -0
  404. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +5 -0
  405. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +5 -0
  406. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +5 -0
  407. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +5 -0
  408. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +5 -0
  409. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +5 -0
  410. data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +78 -0
  411. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_s.cu +5 -0
  412. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_s.cu +5 -0
  413. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu +5 -0
  414. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu +5 -0
  415. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_s.cu +5 -0
  416. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu +5 -0
  417. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu +5 -0
  418. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu +5 -0
  419. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q2_k.cu +5 -0
  420. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q3_k.cu +5 -0
  421. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_0.cu +5 -0
  422. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_1.cu +5 -0
  423. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_k.cu +5 -0
  424. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_0.cu +5 -0
  425. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_1.cu +5 -0
  426. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_k.cu +5 -0
  427. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q6_k.cu +5 -0
  428. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q8_0.cu +5 -0
  429. data/ext/sources/ggml/src/ggml-cuda/tsembd.cu +47 -0
  430. data/ext/sources/ggml/src/ggml-cuda/tsembd.cuh +5 -0
  431. data/ext/sources/ggml/src/ggml-cuda/unary.cu +289 -0
  432. data/ext/sources/ggml/src/ggml-cuda/unary.cuh +59 -0
  433. data/ext/sources/ggml/src/ggml-cuda/upscale.cu +51 -0
  434. data/ext/sources/ggml/src/ggml-cuda/upscale.cuh +5 -0
  435. data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +1135 -0
  436. data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/cuda.h +1 -0
  437. data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/hip.h +57 -0
  438. data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/musa.h +7 -1
  439. data/ext/sources/ggml/src/ggml-cuda/wkv.cu +199 -0
  440. data/ext/sources/ggml/src/ggml-cuda/wkv.cuh +7 -0
  441. data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +131 -0
  442. data/ext/{ggml → sources/ggml}/src/ggml-impl.h +64 -19
  443. data/ext/sources/ggml/src/ggml-kompute/CMakeLists.txt +166 -0
  444. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/common.comp +112 -0
  445. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_add.comp +58 -0
  446. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_addrow.comp +25 -0
  447. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f16.comp +52 -0
  448. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f32.comp +52 -0
  449. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f16.comp +52 -0
  450. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f32.comp +52 -0
  451. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_diagmask.comp +30 -0
  452. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_gelu.comp +22 -0
  453. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows.comp +17 -0
  454. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f16.comp +31 -0
  455. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f32.comp +31 -0
  456. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_0.comp +38 -0
  457. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_1.comp +39 -0
  458. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q6_k.comp +44 -0
  459. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul.comp +52 -0
  460. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp +69 -0
  461. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_mat_f32.comp +51 -0
  462. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_0.comp +33 -0
  463. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_1.comp +35 -0
  464. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp +140 -0
  465. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp +106 -0
  466. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q8_0.comp +73 -0
  467. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp +52 -0
  468. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp +28 -0
  469. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_norm.comp +84 -0
  470. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_relu.comp +21 -0
  471. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rmsnorm.comp +53 -0
  472. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp +52 -0
  473. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp +52 -0
  474. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp +52 -0
  475. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp +52 -0
  476. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale.comp +19 -0
  477. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale_8.comp +23 -0
  478. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_silu.comp +22 -0
  479. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp +72 -0
  480. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp +71 -0
  481. data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +120 -0
  482. data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +622 -0
  483. data/ext/{ggml → sources/ggml}/src/ggml-metal/ggml-metal.m +2178 -1064
  484. data/ext/{ggml → sources/ggml}/src/ggml-metal/ggml-metal.metal +1575 -1218
  485. data/ext/sources/ggml/src/ggml-musa/CMakeLists.txt +113 -0
  486. data/ext/sources/ggml/src/ggml-musa/mudnn.cu +112 -0
  487. data/ext/sources/ggml/src/ggml-musa/mudnn.cuh +12 -0
  488. data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +96 -0
  489. data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +5124 -0
  490. data/ext/sources/ggml/src/ggml-opencl/kernels/add.cl +83 -0
  491. data/ext/sources/ggml/src/ggml-opencl/kernels/clamp.cl +20 -0
  492. data/ext/sources/ggml/src/ggml-opencl/kernels/cpy.cl +184 -0
  493. data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +118 -0
  494. data/ext/sources/ggml/src/ggml-opencl/kernels/diag_mask_inf.cl +58 -0
  495. data/ext/sources/ggml/src/ggml-opencl/kernels/embed_kernel.py +26 -0
  496. data/ext/sources/ggml/src/ggml-opencl/kernels/gelu.cl +62 -0
  497. data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle.cl +268 -0
  498. data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general.cl +274 -0
  499. data/ext/sources/ggml/src/ggml-opencl/kernels/get_rows.cl +163 -0
  500. data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f16.cl +57 -0
  501. data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f32.cl +57 -0
  502. data/ext/sources/ggml/src/ggml-opencl/kernels/mul.cl +79 -0
  503. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mat_Ab_Bi_8x4.cl +139 -0
  504. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f16.cl +118 -0
  505. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32.cl +118 -0
  506. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_1row.cl +94 -0
  507. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_l4.cl +84 -0
  508. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f32_f32.cl +118 -0
  509. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32.cl +192 -0
  510. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_16x_flat.cl +307 -0
  511. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_8x_flat.cl +265 -0
  512. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_8x_flat.cl +272 -0
  513. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_v.cl +254 -0
  514. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q6_k.cl +190 -0
  515. data/ext/sources/ggml/src/ggml-opencl/kernels/norm.cl +81 -0
  516. data/ext/sources/ggml/src/ggml-opencl/kernels/relu.cl +16 -0
  517. data/ext/sources/ggml/src/ggml-opencl/kernels/rms_norm.cl +96 -0
  518. data/ext/sources/ggml/src/ggml-opencl/kernels/rope.cl +721 -0
  519. data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +16 -0
  520. data/ext/sources/ggml/src/ggml-opencl/kernels/silu.cl +30 -0
  521. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f16.cl +87 -0
  522. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f32.cl +87 -0
  523. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f16.cl +86 -0
  524. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f32.cl +86 -0
  525. data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +84 -0
  526. data/ext/{ggml → sources/ggml}/src/ggml-opt.cpp +373 -190
  527. data/ext/{ggml → sources/ggml}/src/ggml-quants.c +114 -120
  528. data/ext/sources/ggml/src/ggml-rpc/CMakeLists.txt +9 -0
  529. data/ext/{ggml → sources/ggml}/src/ggml-rpc/ggml-rpc.cpp +480 -73
  530. data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +189 -0
  531. data/ext/sources/ggml/src/ggml-sycl/backend.hpp +37 -0
  532. data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +345 -0
  533. data/ext/sources/ggml/src/ggml-sycl/binbcast.hpp +39 -0
  534. data/ext/{ggml → sources/ggml}/src/ggml-sycl/common.cpp +20 -32
  535. data/ext/sources/ggml/src/ggml-sycl/common.hpp +589 -0
  536. data/ext/{ggml → sources/ggml}/src/ggml-sycl/concat.cpp +32 -33
  537. data/ext/sources/ggml/src/ggml-sycl/concat.hpp +20 -0
  538. data/ext/{ggml → sources/ggml}/src/ggml-sycl/conv.cpp +4 -2
  539. data/ext/sources/ggml/src/ggml-sycl/conv.hpp +20 -0
  540. data/ext/{ggml → sources/ggml}/src/ggml-sycl/convert.cpp +104 -28
  541. data/ext/sources/ggml/src/ggml-sycl/convert.hpp +34 -0
  542. data/ext/sources/ggml/src/ggml-sycl/cpy.cpp +700 -0
  543. data/ext/sources/ggml/src/ggml-sycl/cpy.hpp +11 -0
  544. data/ext/sources/ggml/src/ggml-sycl/dequantize.hpp +791 -0
  545. data/ext/{ggml → sources/ggml}/src/ggml-sycl/dmmv.cpp +156 -17
  546. data/ext/sources/ggml/src/ggml-sycl/dmmv.hpp +27 -0
  547. data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +2957 -0
  548. data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +1511 -0
  549. data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +75 -0
  550. data/ext/sources/ggml/src/ggml-sycl/gemm.hpp +99 -0
  551. data/ext/sources/ggml/src/ggml-sycl/getrows.cpp +309 -0
  552. data/ext/sources/ggml/src/ggml-sycl/getrows.hpp +20 -0
  553. data/ext/{ggml → sources/ggml}/src/ggml-sycl/ggml-sycl.cpp +1004 -1240
  554. data/ext/sources/ggml/src/ggml-sycl/gla.cpp +106 -0
  555. data/ext/sources/ggml/src/ggml-sycl/gla.hpp +8 -0
  556. data/ext/sources/ggml/src/ggml-sycl/im2col.cpp +136 -0
  557. data/ext/sources/ggml/src/ggml-sycl/im2col.hpp +21 -0
  558. data/ext/{ggml → sources/ggml}/src/ggml-sycl/mmq.cpp +0 -1
  559. data/ext/sources/ggml/src/ggml-sycl/mmq.hpp +33 -0
  560. data/ext/{ggml → sources/ggml}/src/ggml-sycl/mmvq.cpp +261 -166
  561. data/ext/sources/ggml/src/ggml-sycl/mmvq.hpp +27 -0
  562. data/ext/{ggml → sources/ggml}/src/ggml-sycl/norm.cpp +204 -81
  563. data/ext/sources/ggml/src/ggml-sycl/norm.hpp +26 -0
  564. data/ext/{ggml → sources/ggml}/src/ggml-sycl/outprod.cpp +8 -17
  565. data/ext/sources/ggml/src/ggml-sycl/outprod.hpp +10 -0
  566. data/ext/sources/ggml/src/ggml-sycl/presets.hpp +74 -0
  567. data/ext/sources/ggml/src/ggml-sycl/quants.hpp +83 -0
  568. data/ext/sources/ggml/src/ggml-sycl/rope.cpp +361 -0
  569. data/ext/sources/ggml/src/ggml-sycl/rope.hpp +20 -0
  570. data/ext/{ggml → sources/ggml}/src/ggml-sycl/softmax.cpp +35 -25
  571. data/ext/sources/ggml/src/ggml-sycl/softmax.hpp +20 -0
  572. data/ext/sources/ggml/src/ggml-sycl/sycl_hw.cpp +13 -0
  573. data/ext/sources/ggml/src/ggml-sycl/sycl_hw.hpp +23 -0
  574. data/ext/{ggml → sources/ggml}/src/ggml-sycl/tsembd.cpp +3 -3
  575. data/ext/sources/ggml/src/ggml-sycl/tsembd.hpp +20 -0
  576. data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +1215 -0
  577. data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +293 -0
  578. data/ext/sources/ggml/src/ggml-sycl/wkv.hpp +10 -0
  579. data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +196 -0
  580. data/ext/sources/ggml/src/ggml-vulkan/cmake/host-toolchain.cmake.in +15 -0
  581. data/ext/{ggml → sources/ggml}/src/ggml-vulkan/ggml-vulkan.cpp +3130 -1087
  582. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +39 -0
  583. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +29 -0
  584. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +29 -0
  585. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +51 -0
  586. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +69 -0
  587. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp +17 -0
  588. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp +41 -0
  589. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp +49 -0
  590. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp +105 -0
  591. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp +23 -0
  592. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +51 -0
  593. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +242 -0
  594. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp +17 -0
  595. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/count_equal.comp +31 -0
  596. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp +20 -0
  597. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +462 -0
  598. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +699 -0
  599. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_head.comp +13 -0
  600. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_m.comp +42 -0
  601. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_s.comp +35 -0
  602. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +44 -0
  603. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp +43 -0
  604. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +48 -0
  605. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +39 -0
  606. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +49 -0
  607. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp +32 -0
  608. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp +34 -0
  609. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +34 -0
  610. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +42 -0
  611. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp +30 -0
  612. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp +32 -0
  613. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +68 -0
  614. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp +34 -0
  615. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp +35 -0
  616. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +70 -0
  617. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +33 -0
  618. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp +31 -0
  619. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp +34 -0
  620. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/div.comp +27 -0
  621. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +337 -0
  622. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.comp +162 -0
  623. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +360 -0
  624. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +267 -0
  625. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +59 -0
  626. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp +25 -0
  627. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp +23 -0
  628. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp +64 -0
  629. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_head.comp +9 -0
  630. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp +76 -0
  631. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +33 -0
  632. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +41 -0
  633. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp +66 -0
  634. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +100 -0
  635. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +41 -0
  636. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp +22 -0
  637. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp +27 -0
  638. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp +48 -0
  639. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +169 -0
  640. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +118 -0
  641. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_m.comp +82 -0
  642. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp +79 -0
  643. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_s.comp +90 -0
  644. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xs.comp +87 -0
  645. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xxs.comp +87 -0
  646. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_s.comp +90 -0
  647. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_xxs.comp +88 -0
  648. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +118 -0
  649. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp +154 -0
  650. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +130 -0
  651. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +132 -0
  652. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +136 -0
  653. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +167 -0
  654. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +130 -0
  655. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +868 -0
  656. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +441 -0
  657. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +442 -0
  658. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +99 -0
  659. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp +44 -0
  660. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_adamw.comp +42 -0
  661. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +28 -0
  662. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp +74 -0
  663. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +77 -0
  664. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp +21 -0
  665. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp +26 -0
  666. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat_back.comp +37 -0
  667. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +52 -0
  668. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_back.comp +55 -0
  669. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +58 -0
  670. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +60 -0
  671. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +43 -0
  672. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +43 -0
  673. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +47 -0
  674. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +24 -0
  675. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sigmoid.comp +20 -0
  676. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp +22 -0
  677. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu_back.comp +26 -0
  678. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp +17 -0
  679. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +173 -0
  680. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +50 -0
  681. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/square.comp +17 -0
  682. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sub.comp +29 -0
  683. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +37 -0
  684. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp +20 -0
  685. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_bfloat16_support.comp +7 -0
  686. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp +7 -0
  687. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp +7 -0
  688. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_integer_dot_support.comp +7 -0
  689. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +41 -0
  690. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +1373 -0
  691. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +36 -0
  692. data/ext/{ggml → sources/ggml}/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +193 -35
  693. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp +87 -0
  694. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/wkv7.comp +91 -0
  695. data/ext/{ggml → sources/ggml}/src/ggml.c +676 -1820
  696. data/ext/sources/ggml/src/gguf.cpp +1330 -0
  697. data/ext/{include → sources/include}/whisper.h +68 -2
  698. data/ext/sources/src/CMakeLists.txt +143 -0
  699. data/ext/{src → sources/src}/coreml/whisper-decoder-impl.h +27 -15
  700. data/ext/{src → sources/src}/coreml/whisper-decoder-impl.m +35 -10
  701. data/ext/{src → sources/src}/coreml/whisper-encoder-impl.h +21 -9
  702. data/ext/{src → sources/src}/coreml/whisper-encoder-impl.m +28 -3
  703. data/ext/sources/src/coreml/whisper-encoder.mm +73 -0
  704. data/ext/sources/src/whisper-arch.h +197 -0
  705. data/ext/{src → sources/src}/whisper.cpp +1905 -374
  706. data/ext/sources/tests/CMakeLists.txt +105 -0
  707. data/ext/sources/tests/earnings21/eval.mk +58 -0
  708. data/ext/sources/tests/earnings21/eval.py +68 -0
  709. data/ext/sources/tests/earnings21/normalizers/__init__.py +2 -0
  710. data/ext/sources/tests/earnings21/normalizers/basic.py +80 -0
  711. data/ext/sources/tests/earnings21/normalizers/english.json +1741 -0
  712. data/ext/sources/tests/earnings21/normalizers/english.py +550 -0
  713. data/ext/sources/tests/earnings21/requirements.txt +6 -0
  714. data/ext/sources/tests/en-0-ref.txt +1 -0
  715. data/ext/sources/tests/en-1-ref.txt +1 -0
  716. data/ext/sources/tests/en-2-ref.txt +1 -0
  717. data/ext/sources/tests/es-0-ref.txt +1 -0
  718. data/ext/sources/tests/librispeech/eval.mk +39 -0
  719. data/ext/sources/tests/librispeech/eval.py +47 -0
  720. data/ext/sources/tests/librispeech/normalizers/__init__.py +2 -0
  721. data/ext/sources/tests/librispeech/normalizers/basic.py +80 -0
  722. data/ext/sources/tests/librispeech/normalizers/english.json +1741 -0
  723. data/ext/sources/tests/librispeech/normalizers/english.py +550 -0
  724. data/ext/sources/tests/librispeech/requirements.txt +6 -0
  725. data/ext/sources/tests/run-tests.sh +130 -0
  726. data/ext/sources/tests/test-c.c +3 -0
  727. data/ext/sources/tests/test-vad-full.cpp +54 -0
  728. data/ext/sources/tests/test-vad.cpp +83 -0
  729. data/ext/sources/tests/test-whisper.js +58 -0
  730. data/extsources.rb +33 -5
  731. data/lib/whisper/model/uri.rb +149 -128
  732. data/sig/whisper.rbs +480 -0
  733. data/tests/helper.rb +28 -0
  734. data/tests/test_callback.rb +45 -3
  735. data/tests/test_error.rb +2 -2
  736. data/tests/test_model.rb +38 -0
  737. data/tests/test_package.rb +18 -3
  738. data/tests/test_params.rb +145 -8
  739. data/tests/test_segment.rb +10 -19
  740. data/tests/test_vad.rb +19 -0
  741. data/tests/test_vad_params.rb +103 -0
  742. data/tests/test_whisper.rb +37 -37
  743. data/whispercpp.gemspec +5 -4
  744. metadata +766 -111
  745. data/ext/cpu.mk +0 -9
  746. data/ext/examples/dr_wav.h +0 -8815
  747. data/ext/ggml/src/ggml-cann/aclnn_ops.h +0 -592
  748. data/ext/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +0 -4262
  749. data/ext/ggml/src/ggml-cpu/ggml-cpu.c +0 -14123
  750. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.cpp +0 -1884
  751. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.h +0 -14
  752. data/ext/ggml/src/ggml-metal/ggml-metal-impl.h +0 -288
  753. data/ext/ggml/src/ggml-sycl/element_wise.cpp +0 -1030
  754. data/ext/ggml/src/ggml-sycl/im2col.cpp +0 -126
  755. data/ext/ggml/src/ggml-sycl/rope.cpp +0 -276
  756. data/ext/ggml/src/ggml-sycl/wkv6.cpp +0 -141
  757. data/ext/metal-embed.mk +0 -17
  758. data/ext/metal.mk +0 -6
  759. data/ext/ruby_whisper.cpp +0 -1909
  760. data/ext/scripts/get-flags.mk +0 -38
  761. data/lib/whisper.rb +0 -2
  762. /data/ext/{ggml → sources/ggml}/include/ggml-blas.h +0 -0
  763. /data/ext/{ggml → sources/ggml}/include/ggml-cann.h +0 -0
  764. /data/ext/{ggml → sources/ggml}/include/ggml-cuda.h +0 -0
  765. /data/ext/{ggml → sources/ggml}/include/ggml-kompute.h +0 -0
  766. /data/ext/{ggml → sources/ggml}/include/ggml-opencl.h +0 -0
  767. /data/ext/{ggml → sources/ggml}/include/ggml-sycl.h +0 -0
  768. /data/ext/{ggml → sources/ggml}/src/ggml-amx/common.h +0 -0
  769. /data/ext/{ggml → sources/ggml}/src/ggml-amx/ggml-amx.cpp +0 -0
  770. /data/ext/{ggml → sources/ggml}/src/ggml-amx/mmq.cpp +0 -0
  771. /data/ext/{ggml → sources/ggml}/src/ggml-amx/mmq.h +0 -0
  772. /data/ext/{ggml → sources/ggml}/src/ggml-blas/ggml-blas.cpp +0 -0
  773. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/ascendc_kernels.h +0 -0
  774. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_f16.cpp +0 -0
  775. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_f32.cpp +0 -0
  776. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_q4_0.cpp +0 -0
  777. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_q8_0.cpp +0 -0
  778. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +0 -0
  779. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +0 -0
  780. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +0 -0
  781. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/amx.h +0 -0
  782. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/common.h +0 -0
  783. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/mmq.cpp +0 -0
  784. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/mmq.h +0 -0
  785. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-aarch64.h +0 -0
  786. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-hbm.cpp +0 -0
  787. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-hbm.h +0 -0
  788. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-quants.h +0 -0
  789. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-traits.cpp +0 -0
  790. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-traits.h +0 -0
  791. /data/ext/{ggml → sources/ggml}/src/ggml-kompute/ggml-kompute.cpp +0 -0
  792. /data/ext/{ggml → sources/ggml}/src/ggml-quants.h +0 -0
  793. /data/ext/{ggml → sources/ggml}/src/ggml-threading.cpp +0 -0
  794. /data/ext/{ggml → sources/ggml}/src/ggml-threading.h +0 -0
  795. /data/ext/{src → sources/src}/coreml/whisper-encoder.h +0 -0
  796. /data/ext/{src → sources/src}/openvino/whisper-openvino-encoder.cpp +0 -0
  797. /data/ext/{src → sources/src}/openvino/whisper-openvino-encoder.h +0 -0
@@ -0,0 +1,1377 @@
1
+ #ifndef LLAMA_H
2
+ #define LLAMA_H
3
+
4
+ #include "ggml.h"
5
+ #include "ggml-cpu.h"
6
+ #include "ggml-backend.h"
7
+ #include "ggml-opt.h"
8
+
9
+ #include <stddef.h>
10
+ #include <stdint.h>
11
+ #include <stdio.h>
12
+ #include <stdbool.h>
13
+
14
+ #ifdef LLAMA_SHARED
15
+ # if defined(_WIN32) && !defined(__MINGW32__)
16
+ # ifdef LLAMA_BUILD
17
+ # define LLAMA_API __declspec(dllexport)
18
+ # else
19
+ # define LLAMA_API __declspec(dllimport)
20
+ # endif
21
+ # else
22
+ # define LLAMA_API __attribute__ ((visibility ("default")))
23
+ # endif
24
+ #else
25
+ # define LLAMA_API
26
+ #endif
27
+
28
+ #ifdef __GNUC__
29
+ # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
30
+ #elif defined(_MSC_VER)
31
+ # define DEPRECATED(func, hint) __declspec(deprecated(hint)) func
32
+ #else
33
+ # define DEPRECATED(func, hint) func
34
+ #endif
35
+
36
+ #define LLAMA_DEFAULT_SEED 0xFFFFFFFF
37
+
38
+ #define LLAMA_TOKEN_NULL -1
39
+
40
+ #define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
41
+ #define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
42
+ #define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq'
43
+
44
+ #define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
45
+ #define LLAMA_SESSION_VERSION 9
46
+
47
+ #define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ
48
+ #define LLAMA_STATE_SEQ_VERSION 2
49
+
50
+ #ifdef __cplusplus
51
+ extern "C" {
52
+ #endif
53
+
54
+ //
55
+ // C interface
56
+ //
57
+ // TODO: show sample usage
58
+ //
59
+
60
+ struct llama_vocab;
61
+ struct llama_model;
62
+ struct llama_context;
63
+ struct llama_sampler;
64
+ struct llama_kv_cache;
65
+
66
+ typedef int32_t llama_pos;
67
+ typedef int32_t llama_token;
68
+ typedef int32_t llama_seq_id;
69
+
70
+ enum llama_vocab_type {
71
+ LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab
72
+ LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback
73
+ LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE
74
+ LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece
75
+ LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram
76
+ LLAMA_VOCAB_TYPE_RWKV = 5, // RWKV tokenizer based on greedy tokenization
77
+ };
78
+
79
+ // pre-tokenization types
80
+ enum llama_vocab_pre_type {
81
+ LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0,
82
+ LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1,
83
+ LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2,
84
+ LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3,
85
+ LLAMA_VOCAB_PRE_TYPE_FALCON = 4,
86
+ LLAMA_VOCAB_PRE_TYPE_MPT = 5,
87
+ LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
88
+ LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
89
+ LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
90
+ LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
91
+ LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10,
92
+ LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11,
93
+ LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
94
+ LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
95
+ LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
96
+ LLAMA_VOCAB_PRE_TYPE_PORO = 15,
97
+ LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16,
98
+ LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17,
99
+ LLAMA_VOCAB_PRE_TYPE_VIKING = 18,
100
+ LLAMA_VOCAB_PRE_TYPE_JAIS = 19,
101
+ LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20,
102
+ LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21,
103
+ LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22,
104
+ LLAMA_VOCAB_PRE_TYPE_BLOOM = 23,
105
+ LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24,
106
+ LLAMA_VOCAB_PRE_TYPE_EXAONE = 25,
107
+ LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26,
108
+ LLAMA_VOCAB_PRE_TYPE_MINERVA = 27,
109
+ LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28,
110
+ LLAMA_VOCAB_PRE_TYPE_GPT4O = 29,
111
+ LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30,
112
+ LLAMA_VOCAB_PRE_TYPE_TRILLION = 31,
113
+ LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32,
114
+ LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33,
115
+ LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34,
116
+ LLAMA_VOCAB_PRE_TYPE_SEED_CODER = 35,
117
+ };
118
+
119
+ enum llama_rope_type {
120
+ LLAMA_ROPE_TYPE_NONE = -1,
121
+ LLAMA_ROPE_TYPE_NORM = 0,
122
+ LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX,
123
+ LLAMA_ROPE_TYPE_MROPE = GGML_ROPE_TYPE_MROPE,
124
+ LLAMA_ROPE_TYPE_VISION = GGML_ROPE_TYPE_VISION,
125
+ };
126
+
127
+ enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file
128
+ LLAMA_TOKEN_TYPE_UNDEFINED = 0,
129
+ LLAMA_TOKEN_TYPE_NORMAL = 1,
130
+ LLAMA_TOKEN_TYPE_UNKNOWN = 2,
131
+ LLAMA_TOKEN_TYPE_CONTROL = 3,
132
+ LLAMA_TOKEN_TYPE_USER_DEFINED = 4,
133
+ LLAMA_TOKEN_TYPE_UNUSED = 5,
134
+ LLAMA_TOKEN_TYPE_BYTE = 6,
135
+ };
136
+
137
+ enum llama_token_attr {
138
+ LLAMA_TOKEN_ATTR_UNDEFINED = 0,
139
+ LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0,
140
+ LLAMA_TOKEN_ATTR_UNUSED = 1 << 1,
141
+ LLAMA_TOKEN_ATTR_NORMAL = 1 << 2,
142
+ LLAMA_TOKEN_ATTR_CONTROL = 1 << 3, // SPECIAL?
143
+ LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4,
144
+ LLAMA_TOKEN_ATTR_BYTE = 1 << 5,
145
+ LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6,
146
+ LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7,
147
+ LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8,
148
+ LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9,
149
+ };
150
+
151
+ // model file types
152
+ enum llama_ftype {
153
+ LLAMA_FTYPE_ALL_F32 = 0,
154
+ LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
155
+ LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
156
+ LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
157
+ // LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
158
+ // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
159
+ // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
160
+ LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
161
+ LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
162
+ LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
163
+ LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
164
+ LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors
165
+ LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors
166
+ LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors
167
+ LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors
168
+ LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors
169
+ LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
170
+ LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
171
+ LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
172
+ LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors
173
+ LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors
174
+ LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors
175
+ LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors
176
+ LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors
177
+ LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors
178
+ LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors
179
+ LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors
180
+ LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors
181
+ LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors
182
+ LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors
183
+ LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
184
+ LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
185
+ LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors
186
+ //LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // removed from gguf files, use Q4_0 and runtime repack
187
+ //LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // removed from gguf files, use Q4_0 and runtime repack
188
+ //LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // removed from gguf files, use Q4_0 and runtime repack
189
+ LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors
190
+ LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors
191
+
192
+ LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
193
+ };
194
+
195
+ enum llama_rope_scaling_type {
196
+ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1,
197
+ LLAMA_ROPE_SCALING_TYPE_NONE = 0,
198
+ LLAMA_ROPE_SCALING_TYPE_LINEAR = 1,
199
+ LLAMA_ROPE_SCALING_TYPE_YARN = 2,
200
+ LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3,
201
+ LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_LONGROPE,
202
+ };
203
+
204
+ enum llama_pooling_type {
205
+ LLAMA_POOLING_TYPE_UNSPECIFIED = -1,
206
+ LLAMA_POOLING_TYPE_NONE = 0,
207
+ LLAMA_POOLING_TYPE_MEAN = 1,
208
+ LLAMA_POOLING_TYPE_CLS = 2,
209
+ LLAMA_POOLING_TYPE_LAST = 3,
210
+ LLAMA_POOLING_TYPE_RANK = 4, // used by reranking models to attach the classification head to the graph
211
+ };
212
+
213
+ enum llama_attention_type {
214
+ LLAMA_ATTENTION_TYPE_UNSPECIFIED = -1,
215
+ LLAMA_ATTENTION_TYPE_CAUSAL = 0,
216
+ LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1,
217
+ };
218
+
219
+ enum llama_split_mode {
220
+ LLAMA_SPLIT_MODE_NONE = 0, // single GPU
221
+ LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs
222
+ LLAMA_SPLIT_MODE_ROW = 2, // split layers and KV across GPUs, use tensor parallelism if supported
223
+ };
224
+
225
+ // TODO: simplify (https://github.com/ggml-org/llama.cpp/pull/9294#pullrequestreview-2286561979)
226
+ typedef struct llama_token_data {
227
+ llama_token id; // token id
228
+ float logit; // log-odds of the token
229
+ float p; // probability of the token
230
+ } llama_token_data;
231
+
232
+ typedef struct llama_token_data_array {
233
+ // TODO: consider SoA
234
+ // NOTE: this pointer can be modified by the samplers
235
+ llama_token_data * data;
236
+ size_t size;
237
+ int64_t selected; // this is the index in the data array (i.e. not the token id)
238
+ bool sorted;
239
+ } llama_token_data_array;
240
+
241
+ typedef bool (*llama_progress_callback)(float progress, void * user_data);
242
+
243
+ // Input data for llama_decode
244
+ // A llama_batch object can contain input about one or many sequences
245
+ // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
246
+ //
247
+ // - token : the token ids of the input (used when embd is NULL)
248
+ // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
249
+ // - pos : the positions of the respective token in the sequence
250
+ // (if set to NULL, the token position will be tracked automatically by llama_decode)
251
+ // - seq_id : the sequence to which the respective token belongs
252
+ // (if set to NULL, the sequence ID will be assumed to be 0)
253
+ // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output
254
+ // (if set to NULL, only the logits for last token will be returned)
255
+ //
256
+ typedef struct llama_batch {
257
+ int32_t n_tokens;
258
+
259
+ llama_token * token;
260
+ float * embd;
261
+ llama_pos * pos;
262
+ int32_t * n_seq_id;
263
+ llama_seq_id ** seq_id;
264
+ int8_t * logits; // TODO: rename this to "output"
265
+ } llama_batch;
266
+
267
+ enum llama_model_kv_override_type {
268
+ LLAMA_KV_OVERRIDE_TYPE_INT,
269
+ LLAMA_KV_OVERRIDE_TYPE_FLOAT,
270
+ LLAMA_KV_OVERRIDE_TYPE_BOOL,
271
+ LLAMA_KV_OVERRIDE_TYPE_STR,
272
+ };
273
+
274
+ struct llama_model_kv_override {
275
+ enum llama_model_kv_override_type tag;
276
+
277
+ char key[128];
278
+
279
+ union {
280
+ int64_t val_i64;
281
+ double val_f64;
282
+ bool val_bool;
283
+ char val_str[128];
284
+ };
285
+ };
286
+
287
+ struct llama_model_tensor_buft_override {
288
+ const char * pattern;
289
+ ggml_backend_buffer_type_t buft;
290
+ };
291
+
292
+ struct llama_model_params {
293
+ // NULL-terminated list of devices to use for offloading (if NULL, all available devices are used)
294
+ ggml_backend_dev_t * devices;
295
+
296
+ // NULL-terminated list of buffer types to use for tensors that match a pattern
297
+ const struct llama_model_tensor_buft_override * tensor_buft_overrides;
298
+
299
+ int32_t n_gpu_layers; // number of layers to store in VRAM
300
+ enum llama_split_mode split_mode; // how to split the model across multiple GPUs
301
+
302
+ // the GPU that is used for the entire model when split_mode is LLAMA_SPLIT_MODE_NONE
303
+ int32_t main_gpu;
304
+
305
+ // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
306
+ const float * tensor_split;
307
+
308
+ // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
309
+ // If the provided progress_callback returns true, model loading continues.
310
+ // If it returns false, model loading is immediately aborted.
311
+ llama_progress_callback progress_callback;
312
+
313
+ // context pointer passed to the progress callback
314
+ void * progress_callback_user_data;
315
+
316
+ // override key-value pairs of the model meta data
317
+ const struct llama_model_kv_override * kv_overrides;
318
+
319
+ // Keep the booleans together to avoid misalignment during copy-by-value.
320
+ bool vocab_only; // only load the vocabulary, no weights
321
+ bool use_mmap; // use mmap if possible
322
+ bool use_mlock; // force system to keep model in RAM
323
+ bool check_tensors; // validate model tensor data
324
+ };
325
+
326
+ // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
327
+ // https://github.com/ggml-org/llama.cpp/pull/7544
328
+ struct llama_context_params {
329
+ uint32_t n_ctx; // text context, 0 = from model
330
+ uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode
331
+ uint32_t n_ubatch; // physical maximum batch size
332
+ uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models)
333
+ int32_t n_threads; // number of threads to use for generation
334
+ int32_t n_threads_batch; // number of threads to use for batch processing
335
+
336
+ enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
337
+ enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
338
+ enum llama_attention_type attention_type; // attention type to use for embeddings
339
+
340
+ // ref: https://github.com/ggml-org/llama.cpp/pull/2054
341
+ float rope_freq_base; // RoPE base frequency, 0 = from model
342
+ float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
343
+ float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model
344
+ float yarn_attn_factor; // YaRN magnitude scaling factor
345
+ float yarn_beta_fast; // YaRN low correction dim
346
+ float yarn_beta_slow; // YaRN high correction dim
347
+ uint32_t yarn_orig_ctx; // YaRN original context size
348
+ float defrag_thold; // defragment the KV cache if holes/size > thold, <= 0 disabled (default)
349
+
350
+ ggml_backend_sched_eval_callback cb_eval;
351
+ void * cb_eval_user_data;
352
+
353
+ enum ggml_type type_k; // data type for K cache [EXPERIMENTAL]
354
+ enum ggml_type type_v; // data type for V cache [EXPERIMENTAL]
355
+
356
+ // Abort callback
357
+ // if it returns true, execution of llama_decode() will be aborted
358
+ // currently works only with CPU execution
359
+ ggml_abort_callback abort_callback;
360
+ void * abort_callback_data;
361
+
362
+ // Keep the booleans together and at the end of the struct to avoid misalignment during copy-by-value.
363
+ bool embeddings; // if true, extract embeddings (together with logits)
364
+ bool offload_kqv; // offload the KQV ops (including the KV cache) to GPU
365
+ bool flash_attn; // use flash attention [EXPERIMENTAL]
366
+ bool no_perf; // measure performance timings
367
+ bool op_offload; // offload host tensor operations to device
368
+ bool swa_full; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
369
+ };
370
+
371
+ // model quantization parameters
372
+ typedef struct llama_model_quantize_params {
373
+ int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
374
+ enum llama_ftype ftype; // quantize to this llama_ftype
375
+ enum ggml_type output_tensor_type; // output tensor type
376
+ enum ggml_type token_embedding_type; // token embeddings tensor type
377
+ bool allow_requantize; // allow quantizing non-f32/f16 tensors
378
+ bool quantize_output_tensor; // quantize output.weight
379
+ bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
380
+ bool pure; // quantize all tensors to the default type
381
+ bool keep_split; // quantize to the same number of shards
382
+ void * imatrix; // pointer to importance matrix data
383
+ void * kv_overrides; // pointer to vector containing overrides
384
+ void * tensor_types; // pointer to vector containing tensor types
385
+ } llama_model_quantize_params;
386
+
387
+ typedef struct llama_logit_bias {
388
+ llama_token token;
389
+ float bias;
390
+ } llama_logit_bias;
391
+
392
+ typedef struct llama_sampler_chain_params {
393
+ bool no_perf; // whether to measure performance timings
394
+ } llama_sampler_chain_params;
395
+
396
+ // used in chat template
397
+ typedef struct llama_chat_message {
398
+ const char * role;
399
+ const char * content;
400
+ } llama_chat_message;
401
+
402
+ // lora adapter
403
+ struct llama_adapter_lora;
404
+
405
+ // Helpers for getting default parameters
406
+ // TODO: update API to start accepting pointers to params structs (https://github.com/ggml-org/llama.cpp/discussions/9172)
407
+ LLAMA_API struct llama_model_params llama_model_default_params(void);
408
+ LLAMA_API struct llama_context_params llama_context_default_params(void);
409
+ LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void);
410
+ LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
411
+
412
+ // Initialize the llama + ggml backend
413
+ // If numa is true, use NUMA optimizations
414
+ // Call once at the start of the program
415
+ LLAMA_API void llama_backend_init(void);
416
+
417
+ // Call once at the end of the program - currently only used for MPI
418
+ LLAMA_API void llama_backend_free(void);
419
+
420
+ //optional:
421
+ LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa);
422
+
423
+ // Optional: an auto threadpool gets created in ggml if not passed explicitly
424
+ LLAMA_API void llama_attach_threadpool(
425
+ struct llama_context * ctx,
426
+ ggml_threadpool_t threadpool,
427
+ ggml_threadpool_t threadpool_batch);
428
+
429
+ LLAMA_API void llama_detach_threadpool(struct llama_context * ctx);
430
+
431
+ DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file(
432
+ const char * path_model,
433
+ struct llama_model_params params),
434
+ "use llama_model_load_from_file instead");
435
+
436
+ // Load the model from a file
437
+ // If the file is split into multiple parts, the file name must follow this pattern: <name>-%05d-of-%05d.gguf
438
+ // If the split file name does not follow this pattern, use llama_model_load_from_splits
439
+ LLAMA_API struct llama_model * llama_model_load_from_file(
440
+ const char * path_model,
441
+ struct llama_model_params params);
442
+
443
+ // Load the model from multiple splits (support custom naming scheme)
444
+ // The paths must be in the correct order
445
+ LLAMA_API struct llama_model * llama_model_load_from_splits(
446
+ const char ** paths,
447
+ size_t n_paths,
448
+ struct llama_model_params params);
449
+
450
+ LLAMA_API void llama_model_save_to_file(
451
+ const struct llama_model * model,
452
+ const char * path_model);
453
+
454
+ DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
455
+ "use llama_model_free instead");
456
+
457
+ LLAMA_API void llama_model_free(struct llama_model * model);
458
+
459
+ LLAMA_API struct llama_context * llama_init_from_model(
460
+ struct llama_model * model,
461
+ struct llama_context_params params);
462
+
463
+ DEPRECATED(LLAMA_API struct llama_context * llama_new_context_with_model(
464
+ struct llama_model * model,
465
+ struct llama_context_params params),
466
+ "use llama_init_from_model instead");
467
+
468
+ // Frees all allocated memory
469
+ LLAMA_API void llama_free(struct llama_context * ctx);
470
+
471
+ LLAMA_API int64_t llama_time_us(void);
472
+
473
+ LLAMA_API size_t llama_max_devices(void);
474
+ LLAMA_API size_t llama_max_parallel_sequences(void);
475
+
476
+ LLAMA_API bool llama_supports_mmap (void);
477
+ LLAMA_API bool llama_supports_mlock (void);
478
+ LLAMA_API bool llama_supports_gpu_offload(void);
479
+ LLAMA_API bool llama_supports_rpc (void);
480
+
481
+ LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
482
+ LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
483
+ LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx);
484
+ LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx);
485
+
486
+ DEPRECATED(LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model), "use llama_model_n_ctx_train instead");
487
+ DEPRECATED(LLAMA_API int32_t llama_n_embd (const struct llama_model * model), "use llama_model_n_embd instead");
488
+ DEPRECATED(LLAMA_API int32_t llama_n_layer (const struct llama_model * model), "use llama_model_n_layer instead");
489
+ DEPRECATED(LLAMA_API int32_t llama_n_head (const struct llama_model * model), "use llama_model_n_head instead");
490
+
491
+ DEPRECATED(LLAMA_API int32_t llama_n_vocab (const struct llama_vocab * vocab), "use llama_vocab_n_tokens instead");
492
+
493
+ LLAMA_API const struct llama_model * llama_get_model (const struct llama_context * ctx);
494
+ LLAMA_API struct llama_kv_cache * llama_get_kv_self ( struct llama_context * ctx);
495
+ LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); // TODO: rename to llama_get_pooling_type
496
+
497
+ LLAMA_API const struct llama_vocab * llama_model_get_vocab(const struct llama_model * model);
498
+ LLAMA_API enum llama_rope_type llama_model_rope_type(const struct llama_model * model);
499
+
500
+ LLAMA_API int32_t llama_model_n_ctx_train(const struct llama_model * model);
501
+ LLAMA_API int32_t llama_model_n_embd (const struct llama_model * model);
502
+ LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model);
503
+ LLAMA_API int32_t llama_model_n_head (const struct llama_model * model);
504
+ LLAMA_API int32_t llama_model_n_head_kv (const struct llama_model * model);
505
+
506
+ // Get the model's RoPE frequency scaling factor
507
+ LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model);
508
+
509
+ LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_vocab * vocab);
510
+
511
+ LLAMA_API int32_t llama_vocab_n_tokens(const struct llama_vocab * vocab);
512
+
513
+ // Functions to access the model's GGUF metadata scalar values
514
+ // - The functions return the length of the string on success, or -1 on failure
515
+ // - The output string is always null-terminated and cleared on failure
516
+ // - When retrieving a string, an extra byte must be allocated to account for the null terminator
517
+ // - GGUF array values are not supported by these functions
518
+
519
+ // Get metadata value as a string by key name
520
+ LLAMA_API int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
521
+
522
+ // Get the number of metadata key/value pairs
523
+ LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model);
524
+
525
+ // Get metadata key name by index
526
+ LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
527
+
528
+ // Get metadata value as a string by index
529
+ LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
530
+
531
+ // Get a string describing the model type
532
+ LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
533
+
534
+ // Returns the total size of all the tensors in the model in bytes
535
+ LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
536
+
537
+ // Get the default chat template. Returns nullptr if not available
538
+ // If name is NULL, returns the default chat template
539
+ LLAMA_API const char * llama_model_chat_template(const struct llama_model * model, const char * name);
540
+
541
+ // Returns the total number of parameters in the model
542
+ LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
543
+
544
+ // Returns true if the model contains an encoder that requires llama_encode() call
545
+ LLAMA_API bool llama_model_has_encoder(const struct llama_model * model);
546
+
547
+ // Returns true if the model contains a decoder that requires llama_decode() call
548
+ LLAMA_API bool llama_model_has_decoder(const struct llama_model * model);
549
+
550
+ // For encoder-decoder models, this function returns id of the token that must be provided
551
+ // to the decoder to start generating output sequence. For other models, it returns -1.
552
+ LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model);
553
+
554
+ // Returns true if the model is recurrent (like Mamba, RWKV, etc.)
555
+ LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model);
556
+
557
+ // Returns 0 on success
558
+ LLAMA_API uint32_t llama_model_quantize(
559
+ const char * fname_inp,
560
+ const char * fname_out,
561
+ const llama_model_quantize_params * params);
562
+
563
+ //
564
+ // Adapters
565
+ //
566
+
567
+ // Load a LoRA adapter from file
568
+ LLAMA_API struct llama_adapter_lora * llama_adapter_lora_init(
569
+ struct llama_model * model,
570
+ const char * path_lora);
571
+
572
+ // Manually free a LoRA adapter
573
+ // Note: loaded adapters will be free when the associated model is deleted
574
+ LLAMA_API void llama_adapter_lora_free(struct llama_adapter_lora * adapter);
575
+
576
+ // The following functions operate on a llama_context, hence the naming: llama_verb_...
577
+
578
+ // Add a loaded LoRA adapter to given context
579
+ // This will not modify model's weight
580
+ LLAMA_API int32_t llama_set_adapter_lora(
581
+ struct llama_context * ctx,
582
+ struct llama_adapter_lora * adapter,
583
+ float scale);
584
+
585
+ // Remove a specific LoRA adapter from given context
586
+ // Return -1 if the adapter is not present in the context
587
+ LLAMA_API int32_t llama_rm_adapter_lora(
588
+ struct llama_context * ctx,
589
+ struct llama_adapter_lora * adapter);
590
+
591
+ // Remove all LoRA adapters from given context
592
+ LLAMA_API void llama_clear_adapter_lora(struct llama_context * ctx);
593
+
594
+ // Apply a loaded control vector to a llama_context, or if data is NULL, clear
595
+ // the currently loaded vector.
596
+ // n_embd should be the size of a single layer's control, and data should point
597
+ // to an n_embd x n_layers buffer starting from layer 1.
598
+ // il_start and il_end are the layer range the vector should apply to (both inclusive)
599
+ // See llama_control_vector_load in common to load a control vector.
600
+ LLAMA_API int32_t llama_apply_adapter_cvec(
601
+ struct llama_context * ctx,
602
+ const float * data,
603
+ size_t len,
604
+ int32_t n_embd,
605
+ int32_t il_start,
606
+ int32_t il_end);
607
+
608
+ //
609
+ // KV cache
610
+ //
611
+
612
+ // Returns the number of tokens in the KV cache (slow, use only for debug)
613
+ // If a KV cell has multiple sequences assigned to it, it will be counted multiple times
614
+ DEPRECATED(LLAMA_API int32_t llama_kv_self_n_tokens(const struct llama_context * ctx),
615
+ "Use llama_kv_self_seq_pos_max() and llama_kv_self_seq_pos_min() instead (https://github.com/ggml-org/llama.cpp/issues/13793)");
616
+
617
+ // Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
618
+ DEPRECATED(LLAMA_API int32_t llama_kv_self_used_cells(const struct llama_context * ctx),
619
+ "Use llama_kv_self_seq_pos_max() and llama_kv_self_seq_pos_min() instead (https://github.com/ggml-org/llama.cpp/issues/13793)");
620
+
621
+ // Clear the KV cache - both cell info is erased and KV data is zeroed
622
+ LLAMA_API void llama_kv_self_clear(
623
+ struct llama_context * ctx);
624
+
625
+ // Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
626
+ // Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails
627
+ // seq_id < 0 : match any sequence
628
+ // p0 < 0 : [0, p1]
629
+ // p1 < 0 : [p0, inf)
630
+ LLAMA_API bool llama_kv_self_seq_rm(
631
+ struct llama_context * ctx,
632
+ llama_seq_id seq_id,
633
+ llama_pos p0,
634
+ llama_pos p1);
635
+
636
+ // Copy all tokens that belong to the specified sequence to another sequence
637
+ // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
638
+ // p0 < 0 : [0, p1]
639
+ // p1 < 0 : [p0, inf)
640
+ LLAMA_API void llama_kv_self_seq_cp(
641
+ struct llama_context * ctx,
642
+ llama_seq_id seq_id_src,
643
+ llama_seq_id seq_id_dst,
644
+ llama_pos p0,
645
+ llama_pos p1);
646
+
647
+ // Removes all tokens that do not belong to the specified sequence
648
+ LLAMA_API void llama_kv_self_seq_keep(
649
+ struct llama_context * ctx,
650
+ llama_seq_id seq_id);
651
+
652
+ // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
653
+ // If the KV cache is RoPEd, the KV data is updated accordingly:
654
+ // - lazily on next llama_decode()
655
+ // - explicitly with llama_kv_self_update()
656
+ // p0 < 0 : [0, p1]
657
+ // p1 < 0 : [p0, inf)
658
+ LLAMA_API void llama_kv_self_seq_add(
659
+ struct llama_context * ctx,
660
+ llama_seq_id seq_id,
661
+ llama_pos p0,
662
+ llama_pos p1,
663
+ llama_pos delta);
664
+
665
+ // Integer division of the positions by factor of `d > 1`
666
+ // If the KV cache is RoPEd, the KV data is updated accordingly:
667
+ // - lazily on next llama_decode()
668
+ // - explicitly with llama_kv_self_update()
669
+ // p0 < 0 : [0, p1]
670
+ // p1 < 0 : [p0, inf)
671
+ LLAMA_API void llama_kv_self_seq_div(
672
+ struct llama_context * ctx,
673
+ llama_seq_id seq_id,
674
+ llama_pos p0,
675
+ llama_pos p1,
676
+ int d);
677
+
678
+ // Returns the smallest position present in the KV cache for the specified sequence
679
+ // This is typically non-zero only for SWA caches
680
+ // Return -1 if the sequence is empty
681
+ LLAMA_API llama_pos llama_kv_self_seq_pos_min(
682
+ struct llama_context * ctx,
683
+ llama_seq_id seq_id);
684
+
685
+ // Returns the largest position present in the KV cache for the specified sequence
686
+ // Return -1 if the sequence is empty
687
+ LLAMA_API llama_pos llama_kv_self_seq_pos_max(
688
+ struct llama_context * ctx,
689
+ llama_seq_id seq_id);
690
+
691
+ // Defragment the KV cache
692
+ // This will be applied:
693
+ // - lazily on next llama_decode()
694
+ // - explicitly with llama_kv_self_update()
695
+ LLAMA_API void llama_kv_self_defrag(struct llama_context * ctx);
696
+
697
+ // Check if the context supports KV cache shifting
698
+ LLAMA_API bool llama_kv_self_can_shift(const struct llama_context * ctx);
699
+
700
+ // Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
701
+ LLAMA_API void llama_kv_self_update(struct llama_context * ctx);
702
+
703
+ //
704
+ // State / sessions
705
+ //
706
+
707
+ // Returns the *actual* size in bytes of the state
708
+ // (logits, embedding and kv_cache)
709
+ // Only use when saving the state, not when restoring it, otherwise the size may be too small.
710
+ LLAMA_API size_t llama_state_get_size(struct llama_context * ctx);
711
+ LLAMA_API DEPRECATED(size_t llama_get_state_size(struct llama_context * ctx),
712
+ "use llama_state_get_size instead");
713
+
714
+ // Copies the state to the specified destination address.
715
+ // Destination needs to have allocated enough memory.
716
+ // Returns the number of bytes copied
717
+ LLAMA_API size_t llama_state_get_data(
718
+ struct llama_context * ctx,
719
+ uint8_t * dst,
720
+ size_t size);
721
+ LLAMA_API DEPRECATED(size_t llama_copy_state_data(
722
+ struct llama_context * ctx,
723
+ uint8_t * dst),
724
+ "use llama_state_get_data instead");
725
+
726
+ // Set the state reading from the specified address
727
+ // Returns the number of bytes read
728
+ LLAMA_API size_t llama_state_set_data(
729
+ struct llama_context * ctx,
730
+ const uint8_t * src,
731
+ size_t size);
732
+ LLAMA_API DEPRECATED(size_t llama_set_state_data(
733
+ struct llama_context * ctx,
734
+ const uint8_t * src),
735
+ "use llama_state_set_data instead");
736
+
737
+ // Save/load session file
738
+ LLAMA_API bool llama_state_load_file(
739
+ struct llama_context * ctx,
740
+ const char * path_session,
741
+ llama_token * tokens_out,
742
+ size_t n_token_capacity,
743
+ size_t * n_token_count_out);
744
+ LLAMA_API DEPRECATED(bool llama_load_session_file(
745
+ struct llama_context * ctx,
746
+ const char * path_session,
747
+ llama_token * tokens_out,
748
+ size_t n_token_capacity,
749
+ size_t * n_token_count_out),
750
+ "use llama_state_load_file instead");
751
+
752
+ LLAMA_API bool llama_state_save_file(
753
+ struct llama_context * ctx,
754
+ const char * path_session,
755
+ const llama_token * tokens,
756
+ size_t n_token_count);
757
+ LLAMA_API DEPRECATED(bool llama_save_session_file(
758
+ struct llama_context * ctx,
759
+ const char * path_session,
760
+ const llama_token * tokens,
761
+ size_t n_token_count),
762
+ "use llama_state_save_file instead");
763
+
764
+ // Get the exact size needed to copy the KV cache of a single sequence
765
+ LLAMA_API size_t llama_state_seq_get_size(
766
+ struct llama_context * ctx,
767
+ llama_seq_id seq_id);
768
+
769
+ // Copy the KV cache of a single sequence into the specified buffer
770
+ LLAMA_API size_t llama_state_seq_get_data(
771
+ struct llama_context * ctx,
772
+ uint8_t * dst,
773
+ size_t size,
774
+ llama_seq_id seq_id);
775
+
776
+ // Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence
777
+ // Returns:
778
+ // - Positive: Ok
779
+ // - Zero: Failed to load
780
+ LLAMA_API size_t llama_state_seq_set_data(
781
+ struct llama_context * ctx,
782
+ const uint8_t * src,
783
+ size_t size,
784
+ llama_seq_id dest_seq_id);
785
+
786
+ LLAMA_API size_t llama_state_seq_save_file(
787
+ struct llama_context * ctx,
788
+ const char * filepath,
789
+ llama_seq_id seq_id,
790
+ const llama_token * tokens,
791
+ size_t n_token_count);
792
+
793
+ LLAMA_API size_t llama_state_seq_load_file(
794
+ struct llama_context * ctx,
795
+ const char * filepath,
796
+ llama_seq_id dest_seq_id,
797
+ llama_token * tokens_out,
798
+ size_t n_token_capacity,
799
+ size_t * n_token_count_out);
800
+
801
+ //
802
+ // Decoding
803
+ //
804
+
805
+ // Return batch for single sequence of tokens
806
+ // The sequence ID will be fixed to 0
807
+ // The position of the tokens will be tracked automatically by llama_decode
808
+ //
809
+ // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
810
+ //
811
+ LLAMA_API struct llama_batch llama_batch_get_one(
812
+ llama_token * tokens,
813
+ int32_t n_tokens);
814
+
815
+ // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
816
+ // Each token can be assigned up to n_seq_max sequence ids
817
+ // The batch has to be freed with llama_batch_free()
818
+ // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
819
+ // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
820
+ // The rest of the llama_batch members are allocated with size n_tokens
821
+ // All members are left uninitialized
822
+ LLAMA_API struct llama_batch llama_batch_init(
823
+ int32_t n_tokens,
824
+ int32_t embd,
825
+ int32_t n_seq_max);
826
+
827
+ // Frees a batch of tokens allocated with llama_batch_init()
828
+ LLAMA_API void llama_batch_free(struct llama_batch batch);
829
+
830
+ // Process a batch of tokens.
831
+ // In contrast to llama_decode() - this call does not use KV cache.
832
+ // For encode-decoder contexts, processes the batch using the encoder.
833
+ // Can store the encoder output internally for later use by the decoder's cross-attention layers.
834
+ // 0 - success
835
+ // < 0 - error. the KV cache state is restored to the state before this call
836
+ LLAMA_API int32_t llama_encode(
837
+ struct llama_context * ctx,
838
+ struct llama_batch batch);
839
+
840
+ // Process a batch of tokens.
841
+ // Requires KV cache.
842
+ // For encode-decoder contexts, processes the batch using the decoder.
843
+ // Positive return values does not mean a fatal error, but rather a warning.
844
+ // Upon non-zero return values, the KV cache state is restored to the state before this call
845
+ // 0 - success
846
+ // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
847
+ // 2 - aborted
848
+ // -1 - invalid input batch
849
+ // < -1 - error
850
+ LLAMA_API int32_t llama_decode(
851
+ struct llama_context * ctx,
852
+ struct llama_batch batch);
853
+
854
+ // Set the number of threads used for decoding
855
+ // n_threads is the number of threads used for generation (single token)
856
+ // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
857
+ LLAMA_API void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch);
858
+
859
+ // Get the number of threads used for generation of a single token.
860
+ LLAMA_API int32_t llama_n_threads(struct llama_context * ctx);
861
+
862
+ // Get the number of threads used for prompt and batch processing (multiple token).
863
+ LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx);
864
+
865
+ // Set whether the model is in embeddings mode or not
866
+ // If true, embeddings will be returned but logits will not
867
+ LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings);
868
+
869
+ // Set whether to use causal attention or not
870
+ // If set to true, the model will only attend to the past tokens
871
+ LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn);
872
+
873
+ // Set whether the model is in warmup mode or not
874
+ // If true, all model tensors are activated during llama_decode() to load and cache their weights.
875
+ LLAMA_API void llama_set_warmup(struct llama_context * ctx, bool warmup);
876
+
877
+ // Set abort callback
878
+ LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, ggml_abort_callback abort_callback, void * abort_callback_data);
879
+
880
+ // Wait until all computations are finished
881
+ // This is automatically done when using one of the functions below to obtain the computation results
882
+ // and is not necessary to call it explicitly in most cases
883
+ LLAMA_API void llama_synchronize(struct llama_context * ctx);
884
+
885
+ // Token logits obtained from the last call to llama_decode()
886
+ // The logits for which llama_batch.logits[i] != 0 are stored contiguously
887
+ // in the order they have appeared in the batch.
888
+ // Rows: number of tokens for which llama_batch.logits[i] != 0
889
+ // Cols: n_vocab
890
+ LLAMA_API float * llama_get_logits(struct llama_context * ctx);
891
+
892
+ // Logits for the ith token. For positive indices, Equivalent to:
893
+ // llama_get_logits(ctx) + ctx->output_ids[i]*n_vocab
894
+ // Negative indicies can be used to access logits in reverse order, -1 is the last logit.
895
+ // returns NULL for invalid ids.
896
+ LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i);
897
+
898
+ // Get all output token embeddings.
899
+ // when pooling_type == LLAMA_POOLING_TYPE_NONE or when using a generative model,
900
+ // the embeddings for which llama_batch.logits[i] != 0 are stored contiguously
901
+ // in the order they have appeared in the batch.
902
+ // shape: [n_outputs*n_embd]
903
+ // Otherwise, returns NULL.
904
+ LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
905
+
906
+ // Get the embeddings for the ith token. For positive indices, Equivalent to:
907
+ // llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd
908
+ // Negative indicies can be used to access embeddings in reverse order, -1 is the last embedding.
909
+ // shape: [n_embd] (1-dimensional)
910
+ // returns NULL for invalid ids.
911
+ LLAMA_API float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i);
912
+
913
+ // Get the embeddings for a sequence id
914
+ // Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE
915
+ // when pooling_type == LLAMA_POOLING_TYPE_RANK, returns float[1] with the rank of the sequence
916
+ // otherwise: float[n_embd] (1-dimensional)
917
+ LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id);
918
+
919
+ //
920
+ // Vocab
921
+ //
922
+
923
+ LLAMA_API const char * llama_vocab_get_text(const struct llama_vocab * vocab, llama_token token);
924
+
925
+ LLAMA_API float llama_vocab_get_score(const struct llama_vocab * vocab, llama_token token);
926
+
927
+ LLAMA_API enum llama_token_attr llama_vocab_get_attr(const struct llama_vocab * vocab, llama_token token);
928
+
929
+ // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.)
930
+ LLAMA_API bool llama_vocab_is_eog(const struct llama_vocab * vocab, llama_token token);
931
+
932
+ // Identify if Token Id is a control token or a render-able token
933
+ LLAMA_API bool llama_vocab_is_control(const struct llama_vocab * vocab, llama_token token);
934
+
935
+ // Special tokens
936
+ LLAMA_API llama_token llama_vocab_bos(const struct llama_vocab * vocab); // beginning-of-sentence
937
+ LLAMA_API llama_token llama_vocab_eos(const struct llama_vocab * vocab); // end-of-sentence
938
+ LLAMA_API llama_token llama_vocab_eot(const struct llama_vocab * vocab); // end-of-turn
939
+ LLAMA_API llama_token llama_vocab_sep(const struct llama_vocab * vocab); // sentence separator
940
+ LLAMA_API llama_token llama_vocab_nl (const struct llama_vocab * vocab); // next-line
941
+ LLAMA_API llama_token llama_vocab_pad(const struct llama_vocab * vocab); // padding
942
+
943
+ LLAMA_API bool llama_vocab_get_add_bos(const struct llama_vocab * vocab);
944
+ LLAMA_API bool llama_vocab_get_add_eos(const struct llama_vocab * vocab);
945
+
946
+ LLAMA_API llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab);
947
+ LLAMA_API llama_token llama_vocab_fim_suf(const struct llama_vocab * vocab);
948
+ LLAMA_API llama_token llama_vocab_fim_mid(const struct llama_vocab * vocab);
949
+ LLAMA_API llama_token llama_vocab_fim_pad(const struct llama_vocab * vocab);
950
+ LLAMA_API llama_token llama_vocab_fim_rep(const struct llama_vocab * vocab);
951
+ LLAMA_API llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab);
952
+
953
+ DEPRECATED(LLAMA_API const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_text instead");
954
+ DEPRECATED(LLAMA_API float llama_token_get_score(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_score instead");
955
+ DEPRECATED(LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_attr instead");
956
+ DEPRECATED(LLAMA_API bool llama_token_is_eog(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_eog instead");
957
+ DEPRECATED(LLAMA_API bool llama_token_is_control(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_control instead");
958
+ DEPRECATED(LLAMA_API llama_token llama_token_bos(const struct llama_vocab * vocab), "use llama_vocab_bos instead");
959
+ DEPRECATED(LLAMA_API llama_token llama_token_eos(const struct llama_vocab * vocab), "use llama_vocab_eos instead");
960
+ DEPRECATED(LLAMA_API llama_token llama_token_eot(const struct llama_vocab * vocab), "use llama_vocab_eot instead");
961
+ DEPRECATED(LLAMA_API llama_token llama_token_cls(const struct llama_vocab * vocab), "use llama_vocab_cls instead");
962
+ DEPRECATED(LLAMA_API llama_token llama_token_sep(const struct llama_vocab * vocab), "use llama_vocab_sep instead");
963
+ DEPRECATED(LLAMA_API llama_token llama_token_nl (const struct llama_vocab * vocab), "use llama_vocab_nl instead");
964
+ DEPRECATED(LLAMA_API llama_token llama_token_pad(const struct llama_vocab * vocab), "use llama_vocab_pad instead");
965
+ DEPRECATED(LLAMA_API bool llama_add_bos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_bos instead");
966
+ DEPRECATED(LLAMA_API bool llama_add_eos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_eos instead");
967
+ DEPRECATED(LLAMA_API llama_token llama_token_fim_pre(const struct llama_vocab * vocab), "use llama_vocab_fim_pre instead");
968
+ DEPRECATED(LLAMA_API llama_token llama_token_fim_suf(const struct llama_vocab * vocab), "use llama_vocab_fim_suf instead");
969
+ DEPRECATED(LLAMA_API llama_token llama_token_fim_mid(const struct llama_vocab * vocab), "use llama_vocab_fim_mid instead");
970
+ DEPRECATED(LLAMA_API llama_token llama_token_fim_pad(const struct llama_vocab * vocab), "use llama_vocab_fim_pad instead");
971
+ DEPRECATED(LLAMA_API llama_token llama_token_fim_rep(const struct llama_vocab * vocab), "use llama_vocab_fim_rep instead");
972
+ DEPRECATED(LLAMA_API llama_token llama_token_fim_sep(const struct llama_vocab * vocab), "use llama_vocab_fim_sep instead");
973
+
974
+ // CLS is equivalent to BOS
975
+ DEPRECATED(LLAMA_API llama_token llama_vocab_cls(const struct llama_vocab * vocab), // classification
976
+ "use llama_vocab_bos instead");
977
+
978
+ //
979
+ // Tokenization
980
+ //
981
+ // The API is thread-safe.
982
+ //
983
+
984
+ /// @details Convert the provided text into tokens.
985
+ /// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
986
+ /// @return Returns the number of tokens on success, no more than n_tokens_max
987
+ /// @return Returns a negative number on failure - the number of tokens that would have been returned
988
+ /// @param add_special Allow to add BOS and EOS tokens if model is configured to do so.
989
+ /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated
990
+ /// as plaintext. Does not insert a leading space.
991
+ LLAMA_API int32_t llama_tokenize(
992
+ const struct llama_vocab * vocab,
993
+ const char * text,
994
+ int32_t text_len,
995
+ llama_token * tokens,
996
+ int32_t n_tokens_max,
997
+ bool add_special,
998
+ bool parse_special);
999
+
1000
+ // Token Id -> Piece.
1001
+ // Uses the vocabulary in the provided context.
1002
+ // Does not write null terminator to the buffer.
1003
+ // User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix')
1004
+ // @param special If true, special tokens are rendered in the output.
1005
+ LLAMA_API int32_t llama_token_to_piece(
1006
+ const struct llama_vocab * vocab,
1007
+ llama_token token,
1008
+ char * buf,
1009
+ int32_t length,
1010
+ int32_t lstrip,
1011
+ bool special);
1012
+
1013
+ /// @details Convert the provided tokens into text (inverse of llama_tokenize()).
1014
+ /// @param text The char pointer must be large enough to hold the resulting text.
1015
+ /// @return Returns the number of chars/bytes on success, no more than text_len_max.
1016
+ /// @return Returns a negative number on failure - the number of chars/bytes that would have been returned.
1017
+ /// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so.
1018
+ /// @param unparse_special If true, special tokens are rendered in the output.
1019
+ LLAMA_API int32_t llama_detokenize(
1020
+ const struct llama_vocab * vocab,
1021
+ const llama_token * tokens,
1022
+ int32_t n_tokens,
1023
+ char * text,
1024
+ int32_t text_len_max,
1025
+ bool remove_special,
1026
+ bool unparse_special);
1027
+
1028
+ //
1029
+ // Chat templates
1030
+ //
1031
+
1032
+ /// Apply chat template. Inspired by hf apply_chat_template() on python.
1033
+ /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
1034
+ /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggml-org/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
1035
+ /// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.
1036
+ /// @param chat Pointer to a list of multiple llama_chat_message
1037
+ /// @param n_msg Number of llama_chat_message in this chat
1038
+ /// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.
1039
+ /// @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)
1040
+ /// @param length The size of the allocated buffer
1041
+ /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template.
1042
+ LLAMA_API int32_t llama_chat_apply_template(
1043
+ const char * tmpl,
1044
+ const struct llama_chat_message * chat,
1045
+ size_t n_msg,
1046
+ bool add_ass,
1047
+ char * buf,
1048
+ int32_t length);
1049
+
1050
+ // Get list of built-in chat templates
1051
+ LLAMA_API int32_t llama_chat_builtin_templates(const char ** output, size_t len);
1052
+
1053
+ //
1054
+ // Sampling API
1055
+ //
1056
+ // Sample usage:
1057
+ //
1058
+ // // prepare the sampling chain at the start
1059
+ // auto sparams = llama_sampler_chain_default_params();
1060
+ //
1061
+ // llama_sampler * smpl = llama_sampler_chain_init(sparams);
1062
+ //
1063
+ // llama_sampler_chain_add(smpl, llama_sampler_init_top_k(50));
1064
+ // llama_sampler_chain_add(smpl, llama_sampler_init_top_p(0.9, 1));
1065
+ // llama_sampler_chain_add(smpl, llama_sampler_init_temp (0.8));
1066
+ //
1067
+ // // typically, the chain should end with a sampler such as "greedy", "dist" or "mirostat"
1068
+ // // this sampler will be responsible to select the actual token
1069
+ // llama_sampler_chain_add(smpl, llama_sampler_init_dist(seed));
1070
+ //
1071
+ // ...
1072
+ //
1073
+ // // decoding loop:
1074
+ // while (...) {
1075
+ // ...
1076
+ //
1077
+ // llama_decode(ctx, batch);
1078
+ //
1079
+ // // sample from the logits of the last token in the batch
1080
+ // const llama_token id = llama_sampler_sample(smpl, ctx, -1);
1081
+ //
1082
+ // // accepting the token updates the internal state of certain samplers (e.g. grammar, repetition, etc.)
1083
+ // llama_sampler_accept(smpl, id);
1084
+ // ...
1085
+ // }
1086
+ //
1087
+ // llama_sampler_free(smpl);
1088
+ //
1089
+ // TODO: In the future, llama_sampler will be utilized to offload the sampling to the backends (e.g. GPU).
1090
+ //
1091
+
1092
+ typedef void * llama_sampler_context_t;
1093
+
1094
+ // user code can implement the interface below in order to create custom llama_sampler
1095
+ struct llama_sampler_i {
1096
+ const char * (*name) (const struct llama_sampler * smpl); // can be NULL
1097
+ void (*accept)( struct llama_sampler * smpl, llama_token token); // can be NULL
1098
+ void (*apply) ( struct llama_sampler * smpl, llama_token_data_array * cur_p); // required
1099
+ void (*reset) ( struct llama_sampler * smpl); // can be NULL
1100
+ struct llama_sampler * (*clone) (const struct llama_sampler * smpl); // can be NULL if ctx is NULL
1101
+ void (*free) ( struct llama_sampler * smpl); // can be NULL if ctx is NULL
1102
+
1103
+ // TODO: API for internal libllama usage for appending the sampling to an existing ggml_cgraph
1104
+ //void (*apply_ggml) (struct llama_sampler * smpl, ...);
1105
+ };
1106
+
1107
+ struct llama_sampler {
1108
+ const struct llama_sampler_i * iface;
1109
+ llama_sampler_context_t ctx;
1110
+ };
1111
+
1112
+ // mirror of llama_sampler_i:
1113
+ LLAMA_API struct llama_sampler * llama_sampler_init (const struct llama_sampler_i * iface, llama_sampler_context_t ctx);
1114
+ LLAMA_API const char * llama_sampler_name (const struct llama_sampler * smpl);
1115
+ LLAMA_API void llama_sampler_accept( struct llama_sampler * smpl, llama_token token);
1116
+ LLAMA_API void llama_sampler_apply ( struct llama_sampler * smpl, llama_token_data_array * cur_p);
1117
+ LLAMA_API void llama_sampler_reset ( struct llama_sampler * smpl);
1118
+ LLAMA_API struct llama_sampler * llama_sampler_clone (const struct llama_sampler * smpl);
1119
+ // important: do not free if the sampler has been added to a llama_sampler_chain (via llama_sampler_chain_add)
1120
+ LLAMA_API void llama_sampler_free ( struct llama_sampler * smpl);
1121
+
1122
+ // llama_sampler_chain
1123
+ // a type of llama_sampler that can chain multiple samplers one after another
1124
+
1125
+ LLAMA_API struct llama_sampler * llama_sampler_chain_init(struct llama_sampler_chain_params params);
1126
+
1127
+ // important: takes ownership of the sampler object and will free it when llama_sampler_free is called
1128
+ LLAMA_API void llama_sampler_chain_add( struct llama_sampler * chain, struct llama_sampler * smpl);
1129
+ LLAMA_API struct llama_sampler * llama_sampler_chain_get(const struct llama_sampler * chain, int32_t i);
1130
+ LLAMA_API int llama_sampler_chain_n (const struct llama_sampler * chain);
1131
+
1132
+ // after removing a sampler, the chain will no longer own it, and it will not be freed when the chain is freed
1133
+ LLAMA_API struct llama_sampler * llama_sampler_chain_remove( struct llama_sampler * chain, int32_t i);
1134
+
1135
+ // available samplers:
1136
+
1137
+ LLAMA_API struct llama_sampler * llama_sampler_init_greedy(void);
1138
+ LLAMA_API struct llama_sampler * llama_sampler_init_dist (uint32_t seed);
1139
+
1140
+ /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
1141
+ /// NOTE: Avoid using on the full vocabulary as the sorting can become slow. For example, apply top-k or top-p sampling first.
1142
+ DEPRECATED(LLAMA_API struct llama_sampler * llama_sampler_init_softmax (void),
1143
+ "will be removed in the future (see https://github.com/ggml-org/llama.cpp/pull/9896#discussion_r1800920915)");
1144
+
1145
+ /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1146
+ /// Setting k <= 0 makes this a noop
1147
+ LLAMA_API struct llama_sampler * llama_sampler_init_top_k (int32_t k);
1148
+
1149
+ /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
1150
+ LLAMA_API struct llama_sampler * llama_sampler_init_top_p (float p, size_t min_keep);
1151
+
1152
+ /// @details Minimum P sampling as described in https://github.com/ggml-org/llama.cpp/pull/3841
1153
+ LLAMA_API struct llama_sampler * llama_sampler_init_min_p (float p, size_t min_keep);
1154
+
1155
+ /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
1156
+ LLAMA_API struct llama_sampler * llama_sampler_init_typical (float p, size_t min_keep);
1157
+
1158
+ /// #details Updates the logits l_i` = l_i/t. When t <= 0.0f, the maximum logit is kept at it's original value, the rest are set to -inf
1159
+ LLAMA_API struct llama_sampler * llama_sampler_init_temp (float t);
1160
+
1161
+ /// @details Dynamic temperature implementation (a.k.a. entropy) described in the paper https://arxiv.org/abs/2309.02772.
1162
+ LLAMA_API struct llama_sampler * llama_sampler_init_temp_ext (float t, float delta, float exponent);
1163
+
1164
+ /// @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335
1165
+ LLAMA_API struct llama_sampler * llama_sampler_init_xtc (float p, float t, size_t min_keep, uint32_t seed);
1166
+
1167
+ /// @details Top n sigma sampling as described in academic paper "Top-nσ: Not All Logits Are You Need" https://arxiv.org/pdf/2411.07641
1168
+ LLAMA_API struct llama_sampler * llama_sampler_init_top_n_sigma(float n);
1169
+
1170
+ /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1171
+ /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
1172
+ /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
1173
+ /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1174
+ /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
1175
+ /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1176
+ LLAMA_API struct llama_sampler * llama_sampler_init_mirostat(
1177
+ int32_t n_vocab,
1178
+ uint32_t seed,
1179
+ float tau,
1180
+ float eta,
1181
+ int32_t m);
1182
+
1183
+ /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
1184
+ /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
1185
+ /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
1186
+ /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
1187
+ /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
1188
+ LLAMA_API struct llama_sampler * llama_sampler_init_mirostat_v2(
1189
+ uint32_t seed,
1190
+ float tau,
1191
+ float eta);
1192
+
1193
+ /// @details Intializes a GBNF grammar, see grammars/README.md for details.
1194
+ /// @param vocab The vocabulary that this grammar will be used with.
1195
+ /// @param grammar_str The production rules for the grammar, encoded as a string. Returns an empty grammar if empty. Returns NULL if parsing of grammar_str fails.
1196
+ /// @param grammar_root The name of the start symbol for the grammar.
1197
+ LLAMA_API struct llama_sampler * llama_sampler_init_grammar(
1198
+ const struct llama_vocab * vocab,
1199
+ const char * grammar_str,
1200
+ const char * grammar_root);
1201
+
1202
+ DEPRECATED(LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy(
1203
+ const struct llama_vocab * vocab,
1204
+ const char * grammar_str,
1205
+ const char * grammar_root,
1206
+ const char ** trigger_words,
1207
+ size_t num_trigger_words,
1208
+ const llama_token * trigger_tokens,
1209
+ size_t num_trigger_tokens),
1210
+ "use llama_sampler_init_grammar_lazy_patterns instead");
1211
+
1212
+
1213
+ /// @details Lazy grammar sampler, introduced in https://github.com/ggml-org/llama.cpp/pull/9639
1214
+ /// @param trigger_patterns A list of patterns that will trigger the grammar sampler. Pattern will be matched from the start of the generation output, and grammar sampler will be fed content starting from its first match group.
1215
+ /// @param trigger_tokens A list of tokens that will trigger the grammar sampler. Grammar sampler will be fed content starting from the trigger token included.
1216
+ LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy_patterns(
1217
+ const struct llama_vocab * vocab,
1218
+ const char * grammar_str,
1219
+ const char * grammar_root,
1220
+ const char ** trigger_patterns,
1221
+ size_t num_trigger_patterns,
1222
+ const llama_token * trigger_tokens,
1223
+ size_t num_trigger_tokens);
1224
+
1225
+
1226
+ /// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first.
1227
+ LLAMA_API struct llama_sampler * llama_sampler_init_penalties(
1228
+ int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size)
1229
+ float penalty_repeat, // 1.0 = disabled
1230
+ float penalty_freq, // 0.0 = disabled
1231
+ float penalty_present); // 0.0 = disabled
1232
+
1233
+ /// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982
1234
+ LLAMA_API struct llama_sampler * llama_sampler_init_dry(
1235
+ const struct llama_vocab * vocab,
1236
+ int32_t n_ctx_train,
1237
+ float dry_multiplier,
1238
+ float dry_base,
1239
+ int32_t dry_allowed_length,
1240
+ int32_t dry_penalty_last_n,
1241
+ const char ** seq_breakers,
1242
+ size_t num_breakers);
1243
+
1244
+ LLAMA_API struct llama_sampler * llama_sampler_init_logit_bias(
1245
+ int32_t n_vocab,
1246
+ int32_t n_logit_bias,
1247
+ const llama_logit_bias * logit_bias);
1248
+
1249
+ // this sampler is meant to be used for fill-in-the-middle infilling
1250
+ // it's supposed to be used after top_k + top_p sampling
1251
+ //
1252
+ // 1. if the sum of the EOG probs times the number of candidates is higher than the sum of the other probs -> pick EOG
1253
+ // 2. combine probs of tokens that have the same prefix
1254
+ //
1255
+ // example:
1256
+ //
1257
+ // - before:
1258
+ // "hel": 0.5
1259
+ // "hell": 0.2
1260
+ // "hello": 0.1
1261
+ // "dummy": 0.1
1262
+ //
1263
+ // - after:
1264
+ // "hel": 0.8
1265
+ // "dummy": 0.1
1266
+ //
1267
+ // 3. discard non-EOG tokens with low prob
1268
+ // 4. if no tokens are left -> pick EOT
1269
+ //
1270
+ LLAMA_API struct llama_sampler * llama_sampler_init_infill(const struct llama_vocab * vocab);
1271
+
1272
+ // Returns the seed used by the sampler if applicable, LLAMA_DEFAULT_SEED otherwise
1273
+ LLAMA_API uint32_t llama_sampler_get_seed(const struct llama_sampler * smpl);
1274
+
1275
+ /// @details Sample and accept a token from the idx-th output of the last evaluation
1276
+ //
1277
+ // Shorthand for:
1278
+ // const auto * logits = llama_get_logits_ith(ctx, idx);
1279
+ // llama_token_data_array cur_p = { ... init from logits ... };
1280
+ // llama_sampler_apply(smpl, &cur_p);
1281
+ // auto token = cur_p.data[cur_p.selected].id;
1282
+ // llama_sampler_accept(smpl, token);
1283
+ // return token;
1284
+ // Returns the sampled token
1285
+ LLAMA_API llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx);
1286
+
1287
+ // TODO: extend in the future
1288
+ //LLAMA_API void llama_decode_with_sampler(struct llama_context * ctx, struct llama_sampler * smpl, struct llama_batch batch, ...);
1289
+
1290
+ //
1291
+ // Model split
1292
+ //
1293
+
1294
+ /// @details Build a split GGUF final path for this chunk.
1295
+ /// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf"
1296
+ // Returns the split_path length.
1297
+ LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count);
1298
+
1299
+ /// @details Extract the path prefix from the split_path if and only if the split_no and split_count match.
1300
+ /// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0"
1301
+ // Returns the split_prefix length.
1302
+ LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count);
1303
+
1304
+ // Print system information
1305
+ LLAMA_API const char * llama_print_system_info(void);
1306
+
1307
+ // Set callback for all future logging events.
1308
+ // If this is not called, or NULL is supplied, everything is output on stderr.
1309
+ LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data);
1310
+
1311
+ //
1312
+ // Performance utils
1313
+ //
1314
+ // NOTE: Used by llama.cpp examples, avoid using in third-party apps. Instead, do your own performance measurements.
1315
+ //
1316
+
1317
+ struct llama_perf_context_data {
1318
+ double t_start_ms;
1319
+ double t_load_ms;
1320
+ double t_p_eval_ms;
1321
+ double t_eval_ms;
1322
+
1323
+ int32_t n_p_eval;
1324
+ int32_t n_eval;
1325
+ };
1326
+
1327
+ struct llama_perf_sampler_data {
1328
+ double t_sample_ms;
1329
+
1330
+ int32_t n_sample;
1331
+ };
1332
+
1333
+ LLAMA_API struct llama_perf_context_data llama_perf_context (const struct llama_context * ctx);
1334
+ LLAMA_API void llama_perf_context_print(const struct llama_context * ctx);
1335
+ LLAMA_API void llama_perf_context_reset( struct llama_context * ctx);
1336
+
1337
+ // NOTE: the following work only with samplers constructed via llama_sampler_chain_init
1338
+ LLAMA_API struct llama_perf_sampler_data llama_perf_sampler (const struct llama_sampler * chain);
1339
+ LLAMA_API void llama_perf_sampler_print(const struct llama_sampler * chain);
1340
+ LLAMA_API void llama_perf_sampler_reset( struct llama_sampler * chain);
1341
+
1342
+ //
1343
+ // training
1344
+ //
1345
+
1346
+ // function that returns whether or not a given tensor contains trainable parameters
1347
+ typedef bool (*llama_opt_param_filter)(const struct ggml_tensor * tensor, void * userdata);
1348
+
1349
+ // always returns true
1350
+ LLAMA_API bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userdata);
1351
+
1352
+ struct llama_opt_params {
1353
+ uint32_t n_ctx_train; // assumed context size post training, use context size specified in llama_context if 0
1354
+
1355
+ llama_opt_param_filter param_filter; // callback for determining which tensors contain trainable parameters
1356
+ void * param_filter_ud; // userdata for determining which tensors contain trainable parameters
1357
+
1358
+ ggml_opt_get_optimizer_params get_opt_pars; // callback for calculating optimizer parameters
1359
+ void * get_opt_pars_ud; // userdata for calculating optimizer parameters
1360
+ };
1361
+
1362
+ LLAMA_API void llama_opt_init(struct llama_context * lctx, struct llama_model * model, struct llama_opt_params lopt_params);
1363
+
1364
+ LLAMA_API void llama_opt_epoch(
1365
+ struct llama_context * lctx,
1366
+ ggml_opt_dataset_t dataset,
1367
+ ggml_opt_result_t result_train,
1368
+ ggml_opt_result_t result_eval,
1369
+ int64_t idata_split,
1370
+ ggml_opt_epoch_callback callback_train,
1371
+ ggml_opt_epoch_callback callback_eval);
1372
+
1373
+ #ifdef __cplusplus
1374
+ }
1375
+ #endif
1376
+
1377
+ #endif // LLAMA_H