whispercpp 1.3.2 → 1.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (664) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +6 -3
  3. data/README.md +71 -14
  4. data/Rakefile +20 -7
  5. data/ext/.gitignore +4 -6
  6. data/ext/dependencies.rb +36 -24
  7. data/ext/extconf.rb +1 -1
  8. data/ext/options.rb +48 -184
  9. data/ext/ruby_whisper.c +18 -0
  10. data/ext/ruby_whisper_context.c +43 -12
  11. data/ext/ruby_whisper_model.c +1 -1
  12. data/ext/ruby_whisper_params.c +59 -27
  13. data/ext/ruby_whisper_segment.c +81 -4
  14. data/ext/ruby_whisper_transcribe.cpp +13 -7
  15. data/ext/ruby_whisper_vad_params.c +1 -1
  16. data/ext/sources/CMakeLists.txt +5 -1
  17. data/ext/sources/bindings/javascript/package.json +1 -1
  18. data/ext/sources/build-xcframework.sh +24 -0
  19. data/ext/sources/examples/CMakeLists.txt +1 -0
  20. data/ext/sources/examples/addon.node/__test__/whisper.spec.js +120 -24
  21. data/ext/sources/examples/addon.node/addon.cpp +154 -35
  22. data/ext/sources/examples/addon.node/index.js +10 -5
  23. data/ext/sources/examples/addon.node/vad-example.js +132 -0
  24. data/ext/sources/examples/bench/bench.cpp +29 -18
  25. data/ext/sources/examples/bench.wasm/index-tmpl.html +10 -9
  26. data/ext/sources/examples/cli/cli.cpp +7 -4
  27. data/ext/sources/examples/command/command.cpp +58 -32
  28. data/ext/sources/examples/command.wasm/index-tmpl.html +5 -4
  29. data/ext/sources/examples/common-ggml.cpp +2 -0
  30. data/ext/sources/examples/common-whisper.cpp +14 -7
  31. data/ext/sources/examples/lsp/lsp.cpp +21 -17
  32. data/ext/sources/examples/quantize/quantize.cpp +3 -0
  33. data/ext/sources/examples/server/CMakeLists.txt +3 -0
  34. data/ext/sources/examples/server/server.cpp +193 -35
  35. data/ext/sources/examples/server.py +6 -1
  36. data/ext/sources/examples/stream/stream.cpp +10 -2
  37. data/ext/sources/examples/stream.wasm/emscripten.cpp +6 -6
  38. data/ext/sources/examples/stream.wasm/index-tmpl.html +82 -5
  39. data/ext/sources/examples/talk-llama/CMakeLists.txt +3 -0
  40. data/ext/sources/examples/talk-llama/llama-adapter.cpp +101 -4
  41. data/ext/sources/examples/talk-llama/llama-adapter.h +6 -0
  42. data/ext/sources/examples/talk-llama/llama-arch.cpp +756 -15
  43. data/ext/sources/examples/talk-llama/llama-arch.h +85 -1
  44. data/ext/sources/examples/talk-llama/llama-batch.cpp +773 -272
  45. data/ext/sources/examples/talk-llama/llama-batch.h +126 -55
  46. data/ext/sources/examples/talk-llama/llama-chat.cpp +150 -13
  47. data/ext/sources/examples/talk-llama/llama-chat.h +8 -0
  48. data/ext/sources/examples/talk-llama/llama-context.cpp +814 -542
  49. data/ext/sources/examples/talk-llama/llama-context.h +68 -32
  50. data/ext/sources/examples/talk-llama/llama-cparams.cpp +1 -1
  51. data/ext/sources/examples/talk-llama/llama-cparams.h +4 -4
  52. data/ext/sources/examples/talk-llama/llama-graph.cpp +787 -440
  53. data/ext/sources/examples/talk-llama/llama-graph.h +333 -153
  54. data/ext/sources/examples/talk-llama/llama-hparams.cpp +128 -6
  55. data/ext/sources/examples/talk-llama/llama-hparams.h +80 -17
  56. data/ext/sources/examples/talk-llama/llama-impl.h +2 -0
  57. data/ext/sources/examples/talk-llama/llama-kv-cache-iswa.cpp +326 -0
  58. data/ext/sources/examples/talk-llama/llama-kv-cache-iswa.h +137 -0
  59. data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +1248 -1967
  60. data/ext/sources/examples/talk-llama/llama-kv-cache.h +218 -345
  61. data/ext/sources/examples/talk-llama/llama-kv-cells.h +164 -52
  62. data/ext/sources/examples/talk-llama/llama-memory-hybrid.cpp +266 -0
  63. data/ext/sources/examples/talk-llama/llama-memory-hybrid.h +139 -0
  64. data/ext/sources/examples/talk-llama/llama-memory-recurrent.cpp +1154 -0
  65. data/ext/sources/examples/talk-llama/llama-memory-recurrent.h +182 -0
  66. data/ext/sources/examples/talk-llama/llama-memory.cpp +58 -0
  67. data/ext/sources/examples/talk-llama/llama-memory.h +94 -4
  68. data/ext/sources/examples/talk-llama/llama-mmap.cpp +1 -1
  69. data/ext/sources/examples/talk-llama/llama-model-loader.cpp +44 -17
  70. data/ext/sources/examples/talk-llama/llama-model-loader.h +3 -2
  71. data/ext/sources/examples/talk-llama/llama-model-saver.cpp +1 -0
  72. data/ext/sources/examples/talk-llama/llama-model.cpp +11377 -5248
  73. data/ext/sources/examples/talk-llama/llama-model.h +87 -9
  74. data/ext/sources/examples/talk-llama/llama-quant.cpp +137 -16
  75. data/ext/sources/examples/talk-llama/llama-sampling.cpp +226 -126
  76. data/ext/sources/examples/talk-llama/llama-vocab.cpp +502 -38
  77. data/ext/sources/examples/talk-llama/llama-vocab.h +46 -0
  78. data/ext/sources/examples/talk-llama/llama.cpp +76 -17
  79. data/ext/sources/examples/talk-llama/llama.h +176 -151
  80. data/ext/sources/examples/talk-llama/talk-llama.cpp +11 -6
  81. data/ext/sources/examples/talk-llama/unicode.cpp +212 -0
  82. data/ext/sources/examples/talk-llama/unicode.h +45 -0
  83. data/ext/sources/examples/vad-speech-segments/speech.cpp +6 -0
  84. data/ext/sources/examples/wchess/wchess.cmd/wchess.cmd.cpp +6 -2
  85. data/ext/sources/examples/whisper.wasm/index-tmpl.html +17 -16
  86. data/ext/sources/ggml/CMakeLists.txt +106 -33
  87. data/ext/sources/ggml/cmake/common.cmake +24 -0
  88. data/ext/sources/ggml/cmake/ggml-config.cmake.in +132 -93
  89. data/ext/sources/ggml/include/ggml-backend.h +18 -2
  90. data/ext/sources/ggml/include/ggml-cpu.h +2 -0
  91. data/ext/sources/ggml/include/ggml-metal.h +1 -6
  92. data/ext/sources/ggml/include/ggml-opt.h +25 -6
  93. data/ext/sources/ggml/include/ggml-webgpu.h +19 -0
  94. data/ext/sources/ggml/include/ggml-zdnn.h +17 -0
  95. data/ext/sources/ggml/include/ggml.h +365 -21
  96. data/ext/sources/ggml/src/CMakeLists.txt +98 -25
  97. data/ext/sources/ggml/src/ggml-alloc.c +265 -141
  98. data/ext/sources/ggml/src/ggml-backend-impl.h +4 -1
  99. data/ext/sources/ggml/src/ggml-backend-reg.cpp +35 -13
  100. data/ext/sources/ggml/src/ggml-backend.cpp +266 -60
  101. data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +4 -4
  102. data/ext/sources/ggml/src/ggml-blas/ggml-blas.cpp +5 -4
  103. data/ext/sources/ggml/src/ggml-cann/CMakeLists.txt +15 -0
  104. data/ext/sources/ggml/src/ggml-cann/acl_tensor.cpp +3 -1
  105. data/ext/sources/ggml/src/ggml-cann/aclnn_ops.cpp +903 -717
  106. data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +143 -25
  107. data/ext/sources/ggml/src/ggml-cann/common.h +149 -2
  108. data/ext/sources/ggml/src/ggml-cann/ggml-cann.cpp +521 -78
  109. data/ext/sources/ggml/src/ggml-common.h +21 -0
  110. data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +165 -50
  111. data/ext/sources/ggml/src/ggml-cpu/amx/amx.cpp +5 -3
  112. data/ext/sources/ggml/src/ggml-cpu/amx/mmq.cpp +11 -10
  113. data/ext/sources/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  114. data/ext/sources/ggml/src/ggml-cpu/arch/arm/quants.c +3650 -0
  115. data/ext/sources/ggml/src/ggml-cpu/arch/arm/repack.cpp +1891 -0
  116. data/ext/sources/ggml/src/ggml-cpu/arch/loongarch/quants.c +2160 -0
  117. data/ext/sources/ggml/src/ggml-cpu/arch/powerpc/cpu-feats.cpp +82 -0
  118. data/ext/sources/ggml/src/ggml-cpu/arch/powerpc/quants.c +2305 -0
  119. data/ext/sources/ggml/src/ggml-cpu/arch/riscv/quants.c +1897 -0
  120. data/ext/sources/ggml/src/ggml-cpu/arch/riscv/repack.cpp +342 -0
  121. data/ext/sources/ggml/src/ggml-cpu/arch/s390/quants.c +1468 -0
  122. data/ext/sources/ggml/src/ggml-cpu/arch/wasm/quants.c +1221 -0
  123. data/ext/sources/ggml/src/ggml-cpu/arch/x86/quants.c +3820 -0
  124. data/ext/sources/ggml/src/ggml-cpu/arch/x86/repack.cpp +6307 -0
  125. data/ext/sources/ggml/src/ggml-cpu/arch-fallback.h +214 -0
  126. data/ext/sources/ggml/src/ggml-cpu/common.h +18 -3
  127. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-impl.h +23 -7
  128. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +179 -110
  129. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.cpp +44 -33
  130. data/ext/sources/ggml/src/ggml-cpu/{ggml-cpu-hbm.cpp → hbm.cpp} +1 -1
  131. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +152 -18
  132. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.h +7 -1
  133. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +228 -98
  134. data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +532 -1124
  135. data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.h +5 -0
  136. data/ext/sources/ggml/src/ggml-cpu/ops.cpp +3374 -2081
  137. data/ext/sources/ggml/src/ggml-cpu/ops.h +13 -8
  138. data/ext/sources/ggml/src/ggml-cpu/quants.c +1193 -0
  139. data/ext/sources/ggml/src/ggml-cpu/{ggml-cpu-quants.h → quants.h} +34 -0
  140. data/ext/sources/ggml/src/ggml-cpu/repack.cpp +1982 -0
  141. data/ext/sources/ggml/src/ggml-cpu/repack.h +120 -0
  142. data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +367 -46
  143. data/ext/sources/ggml/src/ggml-cpu/spacemit/ime.cpp +1024 -0
  144. data/ext/sources/ggml/src/ggml-cpu/spacemit/ime.h +13 -0
  145. data/ext/sources/ggml/src/ggml-cpu/spacemit/ime1_kernels.cpp +3196 -0
  146. data/ext/sources/ggml/src/ggml-cpu/spacemit/ime_kernels.h +26 -0
  147. data/ext/sources/ggml/src/ggml-cpu/{ggml-cpu-traits.cpp → traits.cpp} +3 -3
  148. data/ext/sources/ggml/src/ggml-cpu/{ggml-cpu-traits.h → traits.h} +1 -1
  149. data/ext/sources/ggml/src/ggml-cpu/vec.cpp +272 -35
  150. data/ext/sources/ggml/src/ggml-cpu/vec.h +794 -142
  151. data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +20 -16
  152. data/ext/sources/ggml/src/ggml-cuda/add-id.cu +58 -0
  153. data/ext/sources/ggml/src/ggml-cuda/add-id.cuh +3 -0
  154. data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +330 -191
  155. data/ext/sources/ggml/src/ggml-cuda/binbcast.cuh +2 -0
  156. data/ext/sources/ggml/src/ggml-cuda/common.cuh +291 -81
  157. data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cu +1 -4
  158. data/ext/sources/ggml/src/ggml-cuda/conv2d-dw.cu +161 -0
  159. data/ext/sources/ggml/src/ggml-cuda/conv2d-dw.cuh +5 -0
  160. data/ext/sources/ggml/src/ggml-cuda/conv2d-transpose.cu +91 -0
  161. data/ext/sources/ggml/src/ggml-cuda/conv2d-transpose.cuh +4 -0
  162. data/ext/sources/ggml/src/ggml-cuda/conv2d.cu +166 -0
  163. data/ext/sources/ggml/src/ggml-cuda/conv2d.cuh +5 -0
  164. data/ext/sources/ggml/src/ggml-cuda/convert.cu +117 -22
  165. data/ext/sources/ggml/src/ggml-cuda/convert.cuh +20 -0
  166. data/ext/sources/ggml/src/ggml-cuda/cpy-utils.cuh +217 -0
  167. data/ext/sources/ggml/src/ggml-cuda/cpy.cu +64 -307
  168. data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cu +2 -14
  169. data/ext/sources/ggml/src/ggml-cuda/dequantize.cuh +14 -40
  170. data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +499 -368
  171. data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +142 -93
  172. data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cu +755 -0
  173. data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cuh +3 -0
  174. data/ext/sources/ggml/src/ggml-cuda/fattn-vec.cuh +593 -0
  175. data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +90 -50
  176. data/ext/sources/ggml/src/ggml-cuda/fattn.cu +185 -198
  177. data/ext/sources/ggml/src/ggml-cuda/fattn.cuh +2 -0
  178. data/ext/sources/ggml/src/ggml-cuda/getrows.cu +50 -39
  179. data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +636 -222
  180. data/ext/sources/ggml/src/ggml-cuda/im2col.cu +196 -35
  181. data/ext/sources/ggml/src/ggml-cuda/im2col.cuh +1 -0
  182. data/ext/sources/ggml/src/ggml-cuda/mean.cu +73 -0
  183. data/ext/sources/ggml/src/ggml-cuda/mean.cuh +3 -0
  184. data/ext/sources/ggml/src/ggml-cuda/mma.cuh +198 -45
  185. data/ext/sources/ggml/src/ggml-cuda/mmf.cu +123 -0
  186. data/ext/sources/ggml/src/ggml-cuda/mmf.cuh +496 -0
  187. data/ext/sources/ggml/src/ggml-cuda/mmq.cu +206 -57
  188. data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +1262 -721
  189. data/ext/sources/ggml/src/ggml-cuda/mmvf.cu +506 -0
  190. data/ext/sources/ggml/src/ggml-cuda/{mmv.cuh → mmvf.cuh} +4 -5
  191. data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +64 -73
  192. data/ext/sources/ggml/src/ggml-cuda/norm.cu +284 -12
  193. data/ext/sources/ggml/src/ggml-cuda/norm.cuh +7 -0
  194. data/ext/sources/ggml/src/ggml-cuda/opt-step-sgd.cu +49 -0
  195. data/ext/sources/ggml/src/ggml-cuda/opt-step-sgd.cuh +5 -0
  196. data/ext/sources/ggml/src/ggml-cuda/pad.cu +46 -23
  197. data/ext/sources/ggml/src/ggml-cuda/pad_reflect_1d.cu +91 -0
  198. data/ext/sources/ggml/src/ggml-cuda/pad_reflect_1d.cuh +5 -0
  199. data/ext/sources/ggml/src/ggml-cuda/quantize.cu +12 -10
  200. data/ext/sources/ggml/src/ggml-cuda/reduce_rows.cuh +53 -0
  201. data/ext/sources/ggml/src/ggml-cuda/roll.cu +67 -0
  202. data/ext/sources/ggml/src/ggml-cuda/roll.cuh +5 -0
  203. data/ext/sources/ggml/src/ggml-cuda/rope.cu +21 -27
  204. data/ext/sources/ggml/src/ggml-cuda/scale.cu +14 -11
  205. data/ext/sources/ggml/src/ggml-cuda/set-rows.cu +276 -0
  206. data/ext/sources/ggml/src/ggml-cuda/set-rows.cuh +7 -0
  207. data/ext/sources/ggml/src/ggml-cuda/softcap.cu +34 -0
  208. data/ext/sources/ggml/src/ggml-cuda/softcap.cuh +5 -0
  209. data/ext/sources/ggml/src/ggml-cuda/softmax.cu +126 -59
  210. data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +10 -2
  211. data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cu +322 -98
  212. data/ext/sources/ggml/src/ggml-cuda/sum.cu +6 -10
  213. data/ext/sources/ggml/src/ggml-cuda/sumrows.cu +23 -19
  214. data/ext/sources/ggml/src/ggml-cuda/sumrows.cuh +0 -1
  215. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-f16.cu +7 -0
  216. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q4_0.cu +7 -0
  217. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q4_1.cu +7 -0
  218. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q5_0.cu +7 -0
  219. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q5_1.cu +7 -0
  220. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q8_0.cu +7 -0
  221. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-f16.cu +7 -0
  222. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q4_0.cu +7 -0
  223. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q4_1.cu +7 -0
  224. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q5_0.cu +7 -0
  225. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q5_1.cu +7 -0
  226. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q8_0.cu +7 -0
  227. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-f16.cu +7 -0
  228. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q4_0.cu +7 -0
  229. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q4_1.cu +7 -0
  230. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q5_0.cu +7 -0
  231. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q5_1.cu +7 -0
  232. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q8_0.cu +7 -0
  233. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-f16.cu +7 -0
  234. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q4_0.cu +7 -0
  235. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q4_1.cu +7 -0
  236. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q5_0.cu +7 -0
  237. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q5_1.cu +7 -0
  238. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q8_0.cu +7 -0
  239. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-f16.cu +7 -0
  240. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q4_0.cu +7 -0
  241. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q4_1.cu +7 -0
  242. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q5_0.cu +7 -0
  243. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q5_1.cu +7 -0
  244. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q8_0.cu +7 -0
  245. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-f16.cu +7 -0
  246. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q4_0.cu +7 -0
  247. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q4_1.cu +7 -0
  248. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q5_0.cu +7 -0
  249. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q5_1.cu +7 -0
  250. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q8_0.cu +7 -0
  251. data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +21 -18
  252. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_1.cu +5 -0
  253. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_10.cu +5 -0
  254. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_11.cu +5 -0
  255. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_12.cu +5 -0
  256. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_13.cu +5 -0
  257. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_14.cu +5 -0
  258. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_15.cu +5 -0
  259. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_16.cu +5 -0
  260. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_2.cu +5 -0
  261. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_3.cu +5 -0
  262. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_4.cu +5 -0
  263. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_5.cu +5 -0
  264. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_6.cu +5 -0
  265. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_7.cu +5 -0
  266. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_8.cu +5 -0
  267. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_9.cu +5 -0
  268. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-mxfp4.cu +5 -0
  269. data/ext/sources/ggml/src/ggml-cuda/topk-moe.cu +259 -0
  270. data/ext/sources/ggml/src/ggml-cuda/topk-moe.cuh +14 -0
  271. data/ext/sources/ggml/src/ggml-cuda/tsembd.cu +3 -3
  272. data/ext/sources/ggml/src/ggml-cuda/unary.cu +179 -0
  273. data/ext/sources/ggml/src/ggml-cuda/unary.cuh +15 -0
  274. data/ext/sources/ggml/src/ggml-cuda/upscale.cu +92 -6
  275. data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +110 -22
  276. data/ext/sources/ggml/src/ggml-cuda/vendors/cuda.h +4 -0
  277. data/ext/sources/ggml/src/ggml-cuda/vendors/hip.h +58 -36
  278. data/ext/sources/ggml/src/ggml-cuda/vendors/musa.h +4 -3
  279. data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +14 -2
  280. data/ext/sources/ggml/src/ggml-impl.h +229 -175
  281. data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +21 -17
  282. data/ext/sources/ggml/src/ggml-metal/ggml-metal-common.cpp +446 -0
  283. data/ext/sources/ggml/src/ggml-metal/ggml-metal-common.h +52 -0
  284. data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.h +33 -0
  285. data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.m +600 -0
  286. data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.cpp +1376 -0
  287. data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.h +226 -0
  288. data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.m +1308 -0
  289. data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +163 -63
  290. data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.cpp +3158 -0
  291. data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.h +82 -0
  292. data/ext/sources/ggml/src/ggml-metal/ggml-metal.cpp +718 -0
  293. data/ext/sources/ggml/src/ggml-metal/ggml-metal.metal +3208 -1575
  294. data/ext/sources/ggml/src/ggml-musa/CMakeLists.txt +18 -8
  295. data/ext/sources/ggml/src/ggml-musa/mudnn.cuh +2 -2
  296. data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +32 -0
  297. data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +4430 -792
  298. data/ext/sources/ggml/src/ggml-opencl/kernels/add.cl +107 -0
  299. data/ext/sources/ggml/src/ggml-opencl/kernels/add_id.cl +42 -0
  300. data/ext/sources/ggml/src/ggml-opencl/kernels/argsort.cl +86 -0
  301. data/ext/sources/ggml/src/ggml-opencl/kernels/concat.cl +109 -0
  302. data/ext/sources/ggml/src/ggml-opencl/kernels/conv2d.cl +185 -0
  303. data/ext/sources/ggml/src/ggml-opencl/kernels/conv2d_f16_f32.cl +176 -0
  304. data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +84 -0
  305. data/ext/sources/ggml/src/ggml-opencl/kernels/div.cl +138 -0
  306. data/ext/sources/ggml/src/ggml-opencl/kernels/flash_attn_f16.cl +370 -0
  307. data/ext/sources/ggml/src/ggml-opencl/kernels/flash_attn_f32.cl +370 -0
  308. data/ext/sources/ggml/src/ggml-opencl/kernels/flash_attn_f32_f16.cl +373 -0
  309. data/ext/sources/ggml/src/ggml-opencl/kernels/gelu.cl +27 -0
  310. data/ext/sources/ggml/src/ggml-opencl/kernels/glu.cl +378 -0
  311. data/ext/sources/ggml/src/ggml-opencl/kernels/group_norm.cl +121 -0
  312. data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f16.cl +1 -1
  313. data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f32.cl +1 -1
  314. data/ext/sources/ggml/src/ggml-opencl/kernels/mul.cl +73 -0
  315. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mat_f16_f32.cl +130 -0
  316. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f16_f32_l4_lm.cl +132 -0
  317. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f32_f32_l4_lm.cl +133 -0
  318. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32.cl +189 -0
  319. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32_flat.cl +176 -0
  320. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_q4_0_f32_8x_flat.cl +283 -0
  321. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_q8_0_f32.cl +140 -0
  322. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_q8_0_f32_flat.cl +222 -0
  323. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_mxfp4_f32.cl +144 -0
  324. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_mxfp4_f32_flat.cl +167 -0
  325. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q8_0_f32.cl +125 -0
  326. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q8_0_f32_flat.cl +202 -0
  327. data/ext/sources/ggml/src/ggml-opencl/kernels/norm.cl +80 -0
  328. data/ext/sources/ggml/src/ggml-opencl/kernels/pad.cl +30 -0
  329. data/ext/sources/ggml/src/ggml-opencl/kernels/repeat.cl +39 -0
  330. data/ext/sources/ggml/src/ggml-opencl/kernels/rms_norm.cl +79 -0
  331. data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +3 -2
  332. data/ext/sources/ggml/src/ggml-opencl/kernels/set_rows.cl +189 -0
  333. data/ext/sources/ggml/src/ggml-opencl/kernels/sigmoid.cl +29 -0
  334. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f16.cl +34 -13
  335. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f32.cl +34 -13
  336. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f16.cl +34 -13
  337. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f32.cl +34 -13
  338. data/ext/sources/ggml/src/ggml-opencl/kernels/sub.cl +138 -0
  339. data/ext/sources/ggml/src/ggml-opencl/kernels/sum_rows.cl +39 -0
  340. data/ext/sources/ggml/src/ggml-opencl/kernels/tanh.cl +63 -0
  341. data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +20 -0
  342. data/ext/sources/ggml/src/ggml-opencl/kernels/tsembd.cl +48 -0
  343. data/ext/sources/ggml/src/ggml-opencl/kernels/upscale.cl +120 -0
  344. data/ext/sources/ggml/src/ggml-opt.cpp +97 -41
  345. data/ext/sources/ggml/src/ggml-quants.c +117 -24
  346. data/ext/sources/ggml/src/ggml-quants.h +6 -0
  347. data/ext/sources/ggml/src/ggml-rpc/ggml-rpc.cpp +85 -62
  348. data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +3 -3
  349. data/ext/sources/ggml/src/ggml-sycl/backend.hpp +2 -0
  350. data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +9 -0
  351. data/ext/sources/ggml/src/ggml-sycl/binbcast.hpp +6 -0
  352. data/ext/sources/ggml/src/ggml-sycl/common.hpp +20 -48
  353. data/ext/sources/ggml/src/ggml-sycl/concat.cpp +13 -17
  354. data/ext/sources/ggml/src/ggml-sycl/convert.cpp +21 -2
  355. data/ext/sources/ggml/src/ggml-sycl/cpy.cpp +116 -211
  356. data/ext/sources/ggml/src/ggml-sycl/cpy.hpp +213 -1
  357. data/ext/sources/ggml/src/ggml-sycl/dequantize.hpp +32 -0
  358. data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +700 -1041
  359. data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +20 -9
  360. data/ext/sources/ggml/src/ggml-sycl/gemm.hpp +17 -26
  361. data/ext/sources/ggml/src/ggml-sycl/getrows.cpp +2 -96
  362. data/ext/sources/ggml/src/ggml-sycl/ggml-sycl.cpp +393 -250
  363. data/ext/sources/ggml/src/ggml-sycl/im2col.cpp +1 -1
  364. data/ext/sources/ggml/src/ggml-sycl/mmvq.cpp +32 -8
  365. data/ext/sources/ggml/src/ggml-sycl/quantize.hpp +133 -0
  366. data/ext/sources/ggml/src/ggml-sycl/quants.hpp +38 -11
  367. data/ext/sources/ggml/src/ggml-sycl/rope.cpp +125 -21
  368. data/ext/sources/ggml/src/ggml-sycl/set_rows.cpp +234 -0
  369. data/ext/sources/ggml/src/ggml-sycl/set_rows.hpp +8 -0
  370. data/ext/sources/ggml/src/ggml-sycl/sycl_hw.cpp +3 -1
  371. data/ext/sources/ggml/src/ggml-sycl/sycl_hw.hpp +3 -0
  372. data/ext/sources/ggml/src/ggml-sycl/tsembd.cpp +4 -3
  373. data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +105 -17
  374. data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +36 -32
  375. data/ext/sources/ggml/src/ggml-vulkan/ggml-vulkan.cpp +4198 -1145
  376. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +4 -12
  377. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +41 -1
  378. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add_id.comp +42 -0
  379. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +13 -4
  380. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +39 -29
  381. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp +349 -0
  382. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv_transpose_1d.comp +98 -0
  383. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +2 -2
  384. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +66 -12
  385. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +154 -0
  386. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +21 -0
  387. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +1 -1
  388. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +2 -1
  389. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +6 -5
  390. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +4 -2
  391. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_mxfp4.comp +32 -0
  392. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +1 -1
  393. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +1 -1
  394. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +1 -1
  395. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +1 -1
  396. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +1 -1
  397. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/exp.comp +21 -0
  398. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +69 -24
  399. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.comp +60 -20
  400. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +98 -42
  401. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +64 -27
  402. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +74 -13
  403. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu.comp +13 -0
  404. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu_erf.comp +27 -0
  405. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu_quick.comp +11 -0
  406. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_erf.comp +39 -0
  407. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp +4 -17
  408. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +19 -10
  409. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +25 -15
  410. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/glu_head.comp +19 -0
  411. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/glu_main.comp +29 -0
  412. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/hardsigmoid.comp +22 -0
  413. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/hardswish.comp +22 -0
  414. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +18 -14
  415. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col_3d.comp +126 -0
  416. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +65 -1
  417. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +11 -7
  418. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vecq.comp +140 -0
  419. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +144 -531
  420. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +206 -38
  421. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_funcs.comp +556 -0
  422. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +12 -5
  423. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +15 -9
  424. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/multi_add.comp +111 -0
  425. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_sgd.comp +22 -0
  426. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +24 -3
  427. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +53 -3
  428. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/reglu.comp +9 -0
  429. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +64 -11
  430. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_partials.comp +65 -0
  431. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/roll.comp +46 -0
  432. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +1 -4
  433. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +7 -9
  434. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +7 -9
  435. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +7 -9
  436. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rte.comp +5 -0
  437. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +1 -1
  438. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +29 -7
  439. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +4 -0
  440. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sqrt.comp +17 -0
  441. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +38 -5
  442. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/swiglu.comp +9 -0
  443. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/swiglu_oai.comp +14 -0
  444. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +4 -3
  445. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +101 -9
  446. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +69 -5
  447. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/utils.comp +25 -0
  448. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +338 -71
  449. data/ext/sources/ggml/src/ggml-webgpu/CMakeLists.txt +54 -0
  450. data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu.cpp +1558 -0
  451. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/add.tmpl.wgsl +44 -0
  452. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/add_in_place.tmpl.wgsl +41 -0
  453. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary_head.tmpl +45 -0
  454. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/common_decls.tmpl +930 -0
  455. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cpy.wgsl +60 -0
  456. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/embed_wgsl.py +124 -0
  457. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/get_rows.tmpl.wgsl +874 -0
  458. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/memset.wgsl +40 -0
  459. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul.tmpl.wgsl +44 -0
  460. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_in_place.tmpl.wgsl +41 -0
  461. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat.tmpl.wgsl +907 -0
  462. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm.wgsl +57 -0
  463. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm_in_place.wgsl +48 -0
  464. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.wgsl +81 -0
  465. data/ext/sources/ggml/src/ggml-zdnn/CMakeLists.txt +36 -0
  466. data/ext/sources/ggml/src/ggml-zdnn/common.hpp +59 -0
  467. data/ext/sources/ggml/src/ggml-zdnn/ggml-zdnn.cpp +628 -0
  468. data/ext/sources/ggml/src/ggml-zdnn/mmf.cpp +80 -0
  469. data/ext/sources/ggml/src/ggml-zdnn/mmf.hpp +12 -0
  470. data/ext/sources/ggml/src/ggml-zdnn/utils.cpp +79 -0
  471. data/ext/sources/ggml/src/ggml-zdnn/utils.hpp +19 -0
  472. data/ext/sources/ggml/src/ggml.c +802 -142
  473. data/ext/sources/ggml/src/ggml.cpp +26 -0
  474. data/ext/sources/ggml/src/gguf.cpp +32 -4
  475. data/ext/sources/include/whisper.h +2 -0
  476. data/ext/sources/src/CMakeLists.txt +2 -0
  477. data/ext/sources/src/coreml/whisper-compat.h +10 -0
  478. data/ext/sources/src/coreml/whisper-compat.m +35 -0
  479. data/ext/sources/src/coreml/whisper-decoder-impl.m +1 -0
  480. data/ext/sources/src/coreml/whisper-encoder-impl.m +1 -0
  481. data/ext/sources/src/whisper.cpp +241 -215
  482. data/ext/sources/tests/CMakeLists.txt +8 -1
  483. data/ext/sources/tests/test-vad-full.cpp +3 -3
  484. data/ext/sources/tests/test-vad.cpp +2 -2
  485. data/extsources.rb +15 -9
  486. data/lib/whisper/context.rb +15 -0
  487. data/lib/whisper/model/uri.rb +57 -2
  488. data/lib/whisper/segment.rb +58 -0
  489. data/sig/whisper.rbs +75 -38
  490. data/{tests → test}/helper.rb +1 -12
  491. data/{tests → test}/test_model.rb +9 -0
  492. data/test/test_package.rb +51 -0
  493. data/{tests → test}/test_params.rb +8 -0
  494. data/test/test_segment.rb +146 -0
  495. data/{tests → test}/test_whisper.rb +70 -0
  496. data/whispercpp.gemspec +2 -3
  497. metadata +246 -191
  498. data/ext/sources/.dockerignore +0 -3
  499. data/ext/sources/.github/workflows/bindings-ruby.yml +0 -21
  500. data/ext/sources/ci/run.sh +0 -336
  501. data/ext/sources/close-issue.yml +0 -28
  502. data/ext/sources/ggml/include/ggml-kompute.h +0 -50
  503. data/ext/sources/ggml/src/ggml-amx/CMakeLists.txt +0 -107
  504. data/ext/sources/ggml/src/ggml-amx/common.h +0 -94
  505. data/ext/sources/ggml/src/ggml-amx/ggml-amx.cpp +0 -446
  506. data/ext/sources/ggml/src/ggml-amx/mmq.cpp +0 -2510
  507. data/ext/sources/ggml/src/ggml-amx/mmq.h +0 -17
  508. data/ext/sources/ggml/src/ggml-cann/kernels/CMakeLists.txt +0 -30
  509. data/ext/sources/ggml/src/ggml-cann/kernels/ascendc_kernels.h +0 -19
  510. data/ext/sources/ggml/src/ggml-cann/kernels/dup.cpp +0 -234
  511. data/ext/sources/ggml/src/ggml-cann/kernels/get_row_f16.cpp +0 -197
  512. data/ext/sources/ggml/src/ggml-cann/kernels/get_row_f32.cpp +0 -190
  513. data/ext/sources/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +0 -204
  514. data/ext/sources/ggml/src/ggml-cann/kernels/get_row_q8_0.cpp +0 -191
  515. data/ext/sources/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +0 -218
  516. data/ext/sources/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +0 -216
  517. data/ext/sources/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +0 -295
  518. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +0 -6431
  519. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +0 -8
  520. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-quants.c +0 -13747
  521. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cu +0 -357
  522. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cuh +0 -3
  523. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cu +0 -365
  524. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cuh +0 -3
  525. data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f16.cuh +0 -482
  526. data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f32.cuh +0 -472
  527. data/ext/sources/ggml/src/ggml-cuda/mmv.cu +0 -336
  528. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +0 -5
  529. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +0 -5
  530. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +0 -5
  531. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +0 -5
  532. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +0 -5
  533. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +0 -5
  534. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +0 -5
  535. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +0 -5
  536. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +0 -5
  537. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +0 -5
  538. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +0 -5
  539. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +0 -5
  540. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +0 -5
  541. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +0 -5
  542. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +0 -5
  543. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +0 -5
  544. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +0 -5
  545. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +0 -5
  546. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +0 -5
  547. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +0 -5
  548. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +0 -5
  549. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +0 -5
  550. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +0 -5
  551. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +0 -5
  552. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +0 -5
  553. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +0 -5
  554. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +0 -5
  555. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +0 -5
  556. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +0 -5
  557. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +0 -5
  558. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +0 -5
  559. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +0 -5
  560. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +0 -5
  561. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +0 -5
  562. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +0 -5
  563. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +0 -5
  564. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +0 -5
  565. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +0 -5
  566. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +0 -5
  567. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +0 -5
  568. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +0 -5
  569. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +0 -5
  570. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +0 -5
  571. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +0 -5
  572. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +0 -5
  573. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +0 -5
  574. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +0 -5
  575. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +0 -5
  576. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +0 -5
  577. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +0 -5
  578. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +0 -5
  579. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +0 -5
  580. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +0 -5
  581. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +0 -5
  582. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +0 -5
  583. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +0 -5
  584. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +0 -5
  585. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +0 -5
  586. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +0 -5
  587. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +0 -5
  588. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +0 -5
  589. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +0 -5
  590. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +0 -5
  591. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +0 -5
  592. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +0 -5
  593. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +0 -5
  594. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +0 -5
  595. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +0 -5
  596. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +0 -5
  597. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +0 -5
  598. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +0 -5
  599. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +0 -5
  600. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +0 -5
  601. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +0 -5
  602. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +0 -5
  603. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +0 -5
  604. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +0 -5
  605. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +0 -5
  606. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +0 -5
  607. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +0 -5
  608. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +0 -5
  609. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +0 -5
  610. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +0 -5
  611. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +0 -5
  612. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +0 -5
  613. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +0 -5
  614. data/ext/sources/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
  615. data/ext/sources/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
  616. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/common.comp +0 -112
  617. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_add.comp +0 -58
  618. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_addrow.comp +0 -25
  619. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f16.comp +0 -52
  620. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f32.comp +0 -52
  621. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f16.comp +0 -52
  622. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f32.comp +0 -52
  623. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_diagmask.comp +0 -30
  624. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_gelu.comp +0 -22
  625. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows.comp +0 -17
  626. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f16.comp +0 -31
  627. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f32.comp +0 -31
  628. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_0.comp +0 -38
  629. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_1.comp +0 -39
  630. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q6_k.comp +0 -44
  631. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul.comp +0 -52
  632. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp +0 -69
  633. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_mat_f32.comp +0 -51
  634. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_0.comp +0 -33
  635. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_1.comp +0 -35
  636. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp +0 -140
  637. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp +0 -106
  638. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q8_0.comp +0 -73
  639. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp +0 -52
  640. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp +0 -28
  641. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_norm.comp +0 -84
  642. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_relu.comp +0 -21
  643. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rmsnorm.comp +0 -53
  644. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp +0 -52
  645. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp +0 -52
  646. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp +0 -52
  647. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp +0 -52
  648. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale.comp +0 -19
  649. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale_8.comp +0 -23
  650. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_silu.comp +0 -22
  651. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp +0 -72
  652. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp +0 -71
  653. data/ext/sources/ggml/src/ggml-metal/ggml-metal.m +0 -5998
  654. data/tests/test_package.rb +0 -46
  655. data/tests/test_segment.rb +0 -74
  656. /data/ext/sources/ggml/src/ggml-cpu/{cpu-feats-x86.cpp → arch/x86/cpu-feats.cpp} +0 -0
  657. /data/ext/sources/ggml/src/ggml-cpu/{ggml-cpu-hbm.h → hbm.h} +0 -0
  658. /data/{tests → test}/jfk_reader/.gitignore +0 -0
  659. /data/{tests → test}/jfk_reader/extconf.rb +0 -0
  660. /data/{tests → test}/jfk_reader/jfk_reader.c +0 -0
  661. /data/{tests → test}/test_callback.rb +0 -0
  662. /data/{tests → test}/test_error.rb +0 -0
  663. /data/{tests → test}/test_vad.rb +0 -0
  664. /data/{tests → test}/test_vad_params.rb +0 -0
@@ -1,313 +1,814 @@
1
1
  #include "llama-batch.h"
2
2
 
3
+ #include "llama-impl.h"
4
+ #include "llama-vocab.h"
5
+ #include "llama-memory.h"
6
+
3
7
  #include <cassert>
4
8
  #include <cstring>
5
9
  #include <algorithm>
10
+ #include <sstream>
6
11
 
7
- llama_ubatch llama_sbatch::reserve_ubatch(size_t n_ubatch, bool has_embd) {
8
- // clear empty sequences
9
- // the previous ubatch is assumed to be gone,
10
- // so nothing should refer to values in these sequences anymore.
11
- for (size_t i = seq.size(); i-- > 0;) {
12
- if (seq[i].length == 0) {
13
- seq.pop_back();
14
- } else {
15
- break;
16
- }
12
+ llama_batch_allocr::llama_batch_allocr(uint32_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {
13
+ const char * LLAMA_BATCH_DEBUG = getenv("LLAMA_BATCH_DEBUG");
14
+ debug = LLAMA_BATCH_DEBUG ? atoi(LLAMA_BATCH_DEBUG) : 0;
15
+
16
+ seq_pos.resize(LLAMA_MAX_SEQ);
17
+ seq_cpl.resize(LLAMA_MAX_SEQ);
18
+ for (auto & cur : seq_cpl) {
19
+ cur.resize(LLAMA_MAX_SEQ);
17
20
  }
18
- ubatch_token.resize(!has_embd ? n_ubatch : 0);
19
- ubatch_embd.resize(has_embd ? n_embd * n_ubatch : 0);
20
- ubatch_pos.resize(n_ubatch);
21
- ubatch_n_seq_id.resize(n_ubatch);
22
- ubatch_seq_id.resize(n_ubatch);
23
- ubatch_output.resize(n_ubatch);
24
- llama_ubatch ubatch = {
25
- /*equal_seqs =*/ true,
26
- /*n_tokens =*/ 0,
27
- /*n_seq_tokens =*/ 0,
28
- /*n_seqs =*/ 0,
29
- /*token =*/ !has_embd ? ubatch_token.data() : nullptr,
30
- /*embd =*/ has_embd ? ubatch_embd.data() : nullptr,
31
- /*pos =*/ ubatch_pos.data(),
32
- /*n_seq_id =*/ ubatch_n_seq_id.data(),
33
- /*seq_id =*/ ubatch_seq_id.data(),
34
- /*output =*/ ubatch_output.data(),
35
- };
36
- return ubatch;
21
+
22
+ seq_idx.resize(LLAMA_MAX_SEQ, -1);
37
23
  }
38
24
 
39
- void llama_sbatch::add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length) {
40
- GGML_ASSERT(batch != nullptr);
41
- GGML_ASSERT(length <= seq.length);
42
- // Can only add sequences of equal lengths to a batch,
43
- // otherwise it isn't clear to which sequence a token belongs
44
- GGML_ASSERT(seq.n_seq_id == 0 || ubatch.n_seqs == 0 || length == (size_t) ubatch.n_tokens / ubatch.n_seqs);
45
- GGML_ASSERT((seq.n_seq_id != 0) == ubatch.equal_seqs);
46
- // NOTE: loops are separated for cache-friendliness
47
- if (batch->token) {
48
- if (ubatch.equal_seqs) {
49
- for (size_t i = 0; i < length; ++i) {
50
- ubatch.token[ubatch.n_tokens + i] = batch->token[ids[seq.offset + i]];
25
+ bool llama_batch_allocr::init(
26
+ const llama_batch & batch_inp,
27
+ const llama_vocab & vocab,
28
+ const llama_memory_i * memory,
29
+ uint32_t n_embd,
30
+ uint32_t n_seq_max,
31
+ bool output_all) {
32
+ clear();
33
+
34
+ batch = batch_inp;
35
+
36
+ this->vocab = &vocab;
37
+
38
+ GGML_ASSERT(batch.n_tokens > 0);
39
+
40
+ //
41
+ // validate input batch
42
+ //
43
+
44
+ if (n_seq_max > LLAMA_MAX_SEQ) {
45
+ LLAMA_LOG_ERROR("%s: n_seq_max = %d > %d\n", __func__, n_seq_max, LLAMA_MAX_SEQ);
46
+ return false;
47
+ }
48
+
49
+ if (batch.token) {
50
+ for (int32_t i = 0; i < batch.n_tokens; ++i) {
51
+ if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= vocab.n_tokens()) {
52
+ LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]);
53
+ return false;
51
54
  }
52
- } else {
53
- // simple split
54
- ubatch.token = batch->token + seq.offset;
55
55
  }
56
- } else {
57
- ubatch.token = nullptr;
58
- }
59
- if (batch->embd) {
60
- if (ubatch.equal_seqs) {
61
- for (size_t i = 0; i < length; ++i) {
62
- memcpy(
63
- ubatch.embd + (n_embd * (ubatch.n_tokens + i)),
64
- batch->embd + (n_embd * ids[seq.offset + i]),
65
- n_embd * sizeof(float)
66
- );
56
+ }
57
+
58
+ if (batch.seq_id) {
59
+ for (int32_t i = 0; i < batch.n_tokens; ++i) {
60
+ for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
61
+ if (batch.seq_id && (batch.seq_id[i][s] < 0 || batch.seq_id[i][s] >= (llama_seq_id) n_seq_max)) {
62
+ LLAMA_LOG_ERROR("%s: invalid seq_id[%d][%d] = %d >= %d\n", __func__, i, s, batch.seq_id[i][s], (llama_seq_id) n_seq_max);
63
+ return false;
64
+ }
67
65
  }
68
- } else {
69
- // simple split
70
- ubatch.embd = batch->embd + (n_embd * seq.offset);
71
66
  }
72
- } else {
73
- ubatch.embd = nullptr;
74
67
  }
75
- if (ubatch.equal_seqs) {
76
- for (size_t i = 0; i < length; ++i) {
77
- ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]];
68
+
69
+ //
70
+ // auto-generate missing fields
71
+ //
72
+
73
+ if (!batch.n_seq_id) {
74
+ n_seq_id.resize(batch.n_tokens);
75
+ for (int32_t i = 0; i < batch.n_tokens; i++) {
76
+ n_seq_id[i] = seq_id_0.size();
78
77
  }
79
- } else {
80
- // simple split
81
- ubatch.pos = batch->pos + seq.offset;
78
+ batch.n_seq_id = n_seq_id.data();
82
79
  }
83
- if (ubatch.equal_seqs) {
84
- ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id;
85
- if (seq.seq_id) {
86
- ubatch.seq_id[ubatch.n_seqs] = seq.seq_id;
80
+
81
+ if (!batch.seq_id) {
82
+ seq_id.resize(batch.n_tokens + 1);
83
+ seq_id[batch.n_tokens] = NULL;
84
+ for (int32_t i = 0; i < batch.n_tokens; i++) {
85
+ seq_id[i] = seq_id_0.data();
87
86
  }
88
- } else {
89
- // simple split
90
- if (batch->n_seq_id) {
91
- ubatch.n_seq_id = batch->n_seq_id + seq.offset;
92
- } else {
93
- for (size_t i = 0; i < length; ++i) {
94
- ubatch.n_seq_id[ubatch.n_seqs + i] = 1;
87
+ batch.seq_id = seq_id.data();
88
+ }
89
+
90
+ if (!batch.pos) {
91
+ pos.resize(batch.n_tokens);
92
+
93
+ // initialize the starting position for each sequence based on the positions in the memory
94
+ llama_pos p0[LLAMA_MAX_SEQ];
95
+ for (uint32_t s = 0; s < n_seq_max; ++s) {
96
+ if (!memory) {
97
+ // if no memory -> start from 0
98
+ p0[s] = 0;
99
+ } else {
100
+ p0[s] = memory->seq_pos_max(s) + 1;
95
101
  }
96
102
  }
97
- if (batch->seq_id) {
98
- ubatch.seq_id = batch->seq_id + seq.offset;
103
+
104
+ for (int32_t i = 0; i < batch.n_tokens; i++) {
105
+ const llama_seq_id seq_id = batch.seq_id[i][0];
106
+
107
+ pos[i] = p0[seq_id];
108
+
109
+ // update the starting position for all sequences that are assigned to the this token
110
+ for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
111
+ const llama_seq_id seq_id = batch.seq_id[i][s];
112
+
113
+ p0[seq_id] = pos[i] + 1;
114
+ }
99
115
  }
116
+
117
+ batch.pos = pos.data();
100
118
  }
101
- if (logits_all) {
102
- for (size_t i = 0; i < length; ++i) {
103
- ubatch.output[ubatch.n_tokens + i] = 1;
104
- out_ids.push_back(ids[seq.offset + i]);
119
+
120
+ if (!batch.logits) {
121
+ if (output_all) {
122
+ // return the output for all tokens
123
+ output.resize(batch.n_tokens, true);
124
+ } else {
125
+ // return the output only for the last token
126
+ output.resize(batch.n_tokens, false);
127
+ output[output.size() - 1] = true;
105
128
  }
106
- } else if (batch->logits) {
107
- if (ubatch.equal_seqs) {
108
- for (size_t i = 0; i < length; ++i) {
109
- size_t id = ids[seq.offset + i];
110
- int8_t is_output = batch->logits[id];
111
- ubatch.output[ubatch.n_tokens + i] = is_output;
112
- if (is_output) { out_ids.push_back(id); }
129
+
130
+ batch.logits = output.data();
131
+ } else if (output_all) {
132
+ bool warn = false;
133
+
134
+ for (int32_t i = 0; i < batch.n_tokens; ++i) {
135
+ if (batch.logits[i] == 0) {
136
+ warn = true;
113
137
  }
114
- } else {
115
- // simple split
116
- ubatch.output = batch->logits + seq.offset;
117
- for (size_t i = 0; i < length; ++i) {
118
- if (ubatch.output[i] != 0) { out_ids.push_back(seq.offset + i); }
138
+ }
139
+
140
+ if (warn) {
141
+ LLAMA_LOG_WARN("%s: embeddings required but some input tokens were not marked as outputs -> overriding\n", __func__);
142
+
143
+ output.resize(batch.n_tokens, true);
144
+ batch.logits = output.data();
145
+ }
146
+ }
147
+
148
+ //
149
+ // compute stats
150
+ //
151
+
152
+ this->n_embd = n_embd;
153
+ this->n_seq_max = n_seq_max;
154
+
155
+ // count the outputs in this batch
156
+ for (int32_t i = 0; i < batch.n_tokens; ++i) {
157
+ n_outputs += batch.logits[i] != 0;
158
+ }
159
+
160
+ has_cpl = false;
161
+
162
+ // determine coupled sequences
163
+ // these are pairs of sequences that have at least one token in the input batch that is assigned to both of them
164
+ for (int32_t i = 0; i < batch.n_tokens; ++i) {
165
+ const llama_seq_id s0 = batch.seq_id[i][0];
166
+
167
+ for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
168
+ const llama_seq_id s1 = batch.seq_id[i][s];
169
+
170
+ seq_pos[s1].insert(batch.pos[i]);
171
+
172
+ if (s > 0) {
173
+ // mark that sequence s1 is coupled to s0
174
+ seq_cpl[s1][s0] = true;
175
+
176
+ // note: tracking the other way around is not necessary for now
177
+ //seq_cpl[s0][s1] = true;
178
+
179
+ has_cpl = true;
119
180
  }
120
181
  }
121
- } else {
122
- // only get last output
123
- for (size_t i = 0; i < length; ++i) {
124
- size_t id = ids[seq.offset + i];
125
- int8_t is_last = id == ids.size() - 1;
126
- ubatch.output[ubatch.n_tokens + i] = is_last;
127
- if (is_last) { out_ids.push_back(id); }
128
- }
129
- }
130
- if (ubatch.n_tokens == 0 && ubatch.n_seqs == 0) {
131
- ubatch.n_seq_tokens = ubatch.equal_seqs ? length : 1;
132
- }
133
- ubatch.n_tokens += length;
134
- ubatch.n_seqs += ubatch.equal_seqs ? 1 : length; // virtual sequences for simple splits
135
- seq.offset += length;
136
- seq.length -= length;
137
- n_tokens -= length;
138
- GGML_ASSERT(ubatch.n_tokens == ubatch.n_seq_tokens * ubatch.n_seqs);
139
- }
182
+ }
140
183
 
141
- llama_ubatch llama_sbatch::split_simple(size_t n_ubatch) {
142
- n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
143
- llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
144
- ubatch.equal_seqs = false;
145
- if (!seq.empty()) {
146
- llama_sbatch_seq & s = seq[0];
147
- size_t length = s.length < n_ubatch ? s.length : n_ubatch;
148
- GGML_ASSERT(seq.size() == 1 && s.n_seq_id == 0); // don't mix with other splits
149
- add_seq_to_ubatch(ubatch, s, length);
150
- }
151
- return ubatch;
152
- }
184
+ // precompute the sequence sets for each token and determine the unique sequence ids that participate in the batch
185
+ {
186
+ seq_set_t seq_set_unq;
187
+
188
+ for (int32_t i = 0; i < batch.n_tokens; ++i) {
189
+ seq_set_t cur;
190
+ for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
191
+ const llama_seq_id seq_id = batch.seq_id[i][s];
153
192
 
154
- llama_ubatch llama_sbatch::split_equal(size_t n_ubatch) {
155
- n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
156
- llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
157
- if (!seq.empty()) {
158
- size_t length = 0;
159
- size_t n_tokens_in_ubatch = 0;
160
- GGML_ASSERT(seq[0].n_seq_id > 0); // should not be mixed with simple splits
161
- // smallest first, because it's easier to split this way;
162
- // starting from the end to pop in constant time.
163
- for (size_t i = seq.size(); i-- > 0;) {
164
- llama_sbatch_seq & s = seq[i];
165
- GGML_ASSERT(s.length > 0);
166
- if (length == 0) {
167
- length = s.length < n_ubatch ? s.length : n_ubatch;
193
+ cur .set(seq_id);
194
+ seq_set_unq.set(seq_id);
195
+ }
196
+
197
+ seq_set.push_back(cur);
198
+ seq_set_map[cur].push_back(i);
199
+ }
200
+
201
+ for (uint32_t s = 0; s < n_seq_max; ++s) {
202
+ if (seq_set_unq.test(s)) {
203
+ seq_idx[s] = seq_id_unq.size();
204
+ seq_id_unq.push_back(s);
168
205
  }
169
- add_seq_to_ubatch(ubatch, s, length);
170
- n_tokens_in_ubatch += length;
171
- // shared prompts can't be mixed with any of their sequences,
172
- // so it's safer to compute them in their own ubatch
173
- if (s.n_seq_id > 1) { break; }
174
- // stop when there isn't enough space for another sequence
175
- if (length + n_tokens_in_ubatch > n_ubatch) { break; }
176
206
  }
177
207
  }
178
- return ubatch;
179
- }
180
208
 
181
- llama_ubatch llama_sbatch::split_seq(size_t n_ubatch) {
182
- n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
183
- llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
184
- if (!seq.empty()) {
185
- llama_sbatch_seq & s = seq[seq.size() - 1];
186
- size_t length = s.length < n_ubatch ? s.length : n_ubatch;
187
- GGML_ASSERT(s.n_seq_id > 0); // should not be mixed with simple splits
188
- add_seq_to_ubatch(ubatch, s, length);
209
+ if (debug > 0) {
210
+ LLAMA_LOG_DEBUG("%s: input batch info:\n", __func__);
211
+
212
+ llama_ubatch ubatch {
213
+ /*.b_equal_seqs =*/ false,
214
+ /*.n_tokens =*/ (uint32_t) batch.n_tokens,
215
+ /*.n_seq_tokens =*/ (uint32_t) 1,
216
+ /*.n_seqs =*/ (uint32_t) batch.n_tokens,
217
+ /*.n_seqs_unq =*/ (uint32_t) this->seq_id_unq.size(),
218
+ /*.token =*/ batch.token,
219
+ /*.embd =*/ batch.embd,
220
+ /*.pos =*/ batch.pos,
221
+ /*.n_seq_id =*/ batch.n_seq_id,
222
+ /*.seq_id =*/ batch.seq_id,
223
+ /*.seq_id_unq =*/ this->seq_id_unq.data(),
224
+ /*.seq_idx =*/ this->seq_idx.data(),
225
+ /*.output =*/ batch.logits,
226
+ /*.data =*/ {},
227
+ };
228
+
229
+ ubatch_print(ubatch, debug);
230
+
231
+ LLAMA_LOG_DEBUG("%s: seq = [\n", __func__);
232
+ for (int s0 = 0; s0 < (int) seq_pos.size(); ++s0) {
233
+ if (seq_pos[s0].empty()) {
234
+ continue;
235
+ }
236
+
237
+ std::stringstream ss;
238
+ for (int s1 = 0; s1 < (int) seq_cpl[s0].size(); ++s1) {
239
+ if (seq_cpl[s0][s1]) {
240
+ ss << s1 << " ";
241
+ }
242
+ }
243
+
244
+ LLAMA_LOG_DEBUG("%s: %4d: pos = [%4d, %4d], cpl = %s\n",
245
+ __func__, s0, seq_pos_min(s0), seq_pos_max(s0), ss.str().empty() ? "-" : ss.str().c_str());
246
+ }
247
+ LLAMA_LOG_DEBUG("%s: ]\n", __func__);
189
248
  }
190
- return ubatch;
191
- }
192
249
 
193
- llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split, bool logits_all) {
194
- GGML_ASSERT(batch.n_tokens >= 0);
195
- this->batch = &batch;
196
- this->n_embd = n_embd;
197
- this->logits_all = logits_all;
250
+ //
251
+ // consistency checks
252
+ //
198
253
 
199
- n_tokens = batch.n_tokens;
200
- ids.resize(n_tokens);
201
- out_ids.clear();
202
- // TODO: reserve out_ids and seq
203
-
204
- for (size_t i = 0; i < n_tokens; ++i) {
205
- ids[i] = i;
206
- }
207
-
208
- if (simple_split) {
209
- seq.resize(1);
210
- llama_sbatch_seq & s = seq[0];
211
- s.n_seq_id = 0;
212
- s.seq_id = nullptr;
213
- s.offset = 0;
214
- s.length = n_tokens;
215
- return;
216
- }
217
-
218
- std::sort(ids.begin(), ids.end(),
219
- [&batch](size_t a, size_t b) {
220
- int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1;
221
- int32_t n_seq_b = batch.n_seq_id ? batch.n_seq_id[b] : 1;
222
- // sort by seq_id, then by pos
223
- if (n_seq_a == n_seq_b) {
224
- if (batch.seq_id) {
225
- for (int32_t i = 0; i < n_seq_a; ++i) {
226
- llama_seq_id seq_id_a = batch.seq_id[a][i];
227
- llama_seq_id seq_id_b = batch.seq_id[b][i];
228
- // smaller seq_ids go first
229
- if (seq_id_a != seq_id_b) {
230
- return seq_id_a < seq_id_b;
231
- }
232
- }
233
- }
234
- // when all else is equal, sort by pos
235
- if (batch.pos) {
236
- return batch.pos[a] < batch.pos[b];
254
+ for (uint32_t s = 0; s < n_seq_max; ++s) {
255
+ if (seq_pos[s].empty()) {
256
+ continue;
257
+ }
258
+
259
+ const llama_pos p0 = memory ? memory->seq_pos_max(s) : -1;
260
+
261
+ if (p0 >= 0) {
262
+ bool ok = true;
263
+
264
+ if (batch.token) {
265
+ if (seq_pos_min(s) != p0 + 1) {
266
+ ok = false;
267
+ }
268
+ } else {
269
+ assert(batch.embd);
270
+
271
+ // for embeddings (typically used as vision input), we allow them to have repeating positions
272
+ // ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762
273
+ if (seq_pos_min(s) != p0 && seq_pos_min(s) != p0 + 1) {
274
+ ok = false;
275
+ }
276
+ }
277
+
278
+ if (!ok) {
279
+ LLAMA_LOG_ERROR(
280
+ "%s: the tokens of sequence %d in the input batch have inconsistent sequence positions:\n"
281
+ " - the last position stored in the memory module of the context (i.e. the KV cache) for sequence %d is X = %d\n"
282
+ " - the tokens for sequence %d in the input batch have a starting position of Y = %d\n"
283
+ " it is required that the sequence positions remain consecutive: Y = X + 1\n",
284
+ __func__, s, s, p0, s, seq_pos_min(s));
285
+
286
+ return false;
287
+ }
288
+ }
289
+
290
+ if (seq_pos_max(s) - seq_pos_min(s) + 1 > (int) seq_pos[s].size()) {
291
+ LLAMA_LOG_ERROR("%s: sequence %d positions are not continuous\n", __func__, s);
292
+ return false;
293
+ }
294
+ }
295
+
296
+ if (memory) {
297
+ for (uint32_t s0 = 0; s0 < n_seq_max; ++s0) {
298
+ for (uint32_t s1 = 0; s1 < n_seq_max; ++s1) {
299
+ if (seq_cpl[s0][s1]) {
300
+ if (memory->seq_pos_min(s0) != memory->seq_pos_min(s1) ||
301
+ memory->seq_pos_max(s0) != memory->seq_pos_max(s1)) {
302
+ LLAMA_LOG_ERROR("%s: sequence %d is coupled to %d in the input batch, but have divereged\n", __func__, s0, s1);
303
+ return false;
237
304
  }
238
- // no pos, sort by id
239
- return a < b;
240
305
  }
241
- // shared prompts go first
242
- return n_seq_a > n_seq_b;
243
306
  }
244
- );
245
-
246
- // init seq
247
- llama_sbatch_seq * last_seq = nullptr;
248
-
249
- for (size_t i = 0; i < n_tokens; ++i) {
250
- const size_t bi = ids[i];
251
- const int32_t n_seqs = batch.n_seq_id[bi];
252
- llama_seq_id * seq_ids = batch.seq_id[bi];
253
- if (last_seq != nullptr) {
254
- bool same = n_seqs == last_seq->n_seq_id;
255
- for (int32_t j = 0; same && j < n_seqs; ++j) {
256
- if (seq_ids[j] != last_seq->seq_id[j]) {
257
- same = false;
307
+ }
308
+ }
309
+
310
+ // disallow partial sequence sub-sets:
311
+ //
312
+ // invalid: x
313
+ // i: 0 1 2 ...
314
+ // ---------------------------------------
315
+ // seq_id[i][0]: 0 0 1
316
+ // seq_id[i][1]: 1 1 2
317
+ // seq_id[i][2]: 2
318
+ //
319
+ // disallow decreasing sequence positions:
320
+ //
321
+ // invalid: x
322
+ // i: 0 1 2 3 4 5 6 ...
323
+ // ---------------------------------------
324
+ // pos[i]: 4 5 0 1 6 2 3
325
+ // seq_id[i][0]: 0 0 1 1 0 1 0
326
+ //
327
+ {
328
+ seq_set_t cur_seq_set[LLAMA_MAX_SEQ];
329
+ for (uint32_t s = 0; s < n_seq_max; ++s) {
330
+ cur_seq_set[s].set();
331
+ }
332
+
333
+ llama_pos cur_seq_pos[LLAMA_MAX_SEQ];
334
+ for (uint32_t s = 0; s < n_seq_max; ++s) {
335
+ cur_seq_pos[s] = -1;
336
+ }
337
+
338
+ for (int32_t i = 0; i < batch.n_tokens; ++i) {
339
+ const llama_pos pos = batch.pos[i];
340
+
341
+ for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
342
+ const llama_seq_id seq_id = batch.seq_id[i][s];
343
+
344
+ cur_seq_set[seq_id] &= seq_set[i];
345
+
346
+ if (cur_seq_set[seq_id].none()) {
347
+ LLAMA_LOG_ERROR("%s: sequence %d belongs to incompatible sequence sets (not allowed)\n", __func__, seq_id);
348
+ return false;
349
+ }
350
+
351
+ if (pos < cur_seq_pos[seq_id]) {
352
+ LLAMA_LOG_ERROR("%s: sequence %d positions are decreasing (not allowed)\n", __func__, seq_id);
353
+ return false;
258
354
  }
259
355
  }
260
- if (same) {
261
- last_seq->length += 1;
262
- continue;
356
+ }
357
+ }
358
+
359
+ split_reset();
360
+
361
+ return true;
362
+ }
363
+
364
+ llama_ubatch llama_batch_allocr::ubatch_reserve(uint32_t n_seq_tokens, uint32_t n_seqs) {
365
+ const uint32_t n_tokens = n_seq_tokens*n_seqs;
366
+
367
+ clear();
368
+ split_reset();
369
+
370
+ auto udata = std::make_shared<llama_ubatch::data_t>();
371
+
372
+ udata->token .resize(n_tokens);
373
+ udata->embd .clear();
374
+ udata->pos .resize(n_tokens);
375
+ udata->n_seq_id .resize(n_tokens);
376
+ udata->seq_id .resize(n_tokens);
377
+ udata->seq_id_unq.resize(0);
378
+ udata->seq_idx .resize(LLAMA_MAX_SEQ, -1);
379
+ udata->output .resize(n_tokens);
380
+
381
+ for (uint32_t s = 0; s < n_seqs; ++s) {
382
+ udata->seq_idx[s] = s;
383
+ udata->seq_id_unq.push_back(s);
384
+ }
385
+
386
+ llama_ubatch res {
387
+ /*.b_equal_seqs =*/ true,
388
+ /*.n_tokens =*/ n_tokens,
389
+ /*.n_seq_tokens =*/ n_seq_tokens,
390
+ /*.n_seqs =*/ n_seqs,
391
+ /*.n_seqs_unq =*/ n_seqs,
392
+
393
+ /*.token =*/ udata->token.data(),
394
+ /*.embd =*/ nullptr,
395
+ /*.pos =*/ udata->pos.data(),
396
+ /*.n_seq_id =*/ udata->n_seq_id.data(),
397
+ /*.seq_id =*/ udata->seq_id.data(),
398
+ /*.seq_id_unq =*/ udata->seq_id_unq.data(),
399
+ /*.seq_idx =*/ udata->seq_idx.data(),
400
+ /*.output =*/ udata->output.data(),
401
+ /*.data =*/ std::move(udata),
402
+ };
403
+
404
+ return res;
405
+ }
406
+
407
+ const llama_batch & llama_batch_allocr::get_batch() const {
408
+ return batch;
409
+ }
410
+
411
+ uint32_t llama_batch_allocr::get_n_tokens() const {
412
+ return batch.n_tokens;
413
+ }
414
+
415
+ uint32_t llama_batch_allocr::get_n_outputs() const {
416
+ return n_outputs;
417
+ }
418
+
419
+ uint32_t llama_batch_allocr::get_n_used() const {
420
+ return n_used;
421
+ }
422
+
423
+ std::vector<int32_t> & llama_batch_allocr::get_out_ids() {
424
+ return out_ids;
425
+ }
426
+
427
+ llama_pos llama_batch_allocr::seq_pos_min(llama_seq_id seq_id) const {
428
+ return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].begin();
429
+ }
430
+
431
+ llama_pos llama_batch_allocr::seq_pos_max(llama_seq_id seq_id) const {
432
+ return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].rbegin();
433
+ }
434
+
435
+ void llama_batch_allocr::split_reset() {
436
+ out_ids.clear();
437
+
438
+ n_used = 0;
439
+
440
+ used.clear();
441
+ used.resize(get_n_tokens(), false);
442
+ }
443
+
444
+ llama_ubatch llama_batch_allocr::split_simple(uint32_t n_ubatch) {
445
+ // find the first unused token
446
+ uint32_t cur_idx = 0;
447
+ while (cur_idx < used.size() && used[cur_idx]) {
448
+ ++cur_idx;
449
+ }
450
+
451
+ // we are done
452
+ if (cur_idx >= used.size()) {
453
+ return {};
454
+ }
455
+
456
+ std::vector<int32_t> idxs;
457
+
458
+ while (true) {
459
+ idxs.push_back(cur_idx);
460
+
461
+ used[cur_idx] = true;
462
+ ++n_used;
463
+
464
+ ++cur_idx;
465
+
466
+ if (cur_idx >= used.size()) {
467
+ break;
468
+ }
469
+
470
+ if (idxs.size() >= n_ubatch) {
471
+ break;
472
+ }
473
+ }
474
+
475
+ return ubatch_add(idxs, idxs.size(), false);
476
+ }
477
+
478
+ llama_ubatch llama_batch_allocr::split_equal(uint32_t n_ubatch, bool sequential) {
479
+ if (sequential && has_cpl) {
480
+ LLAMA_LOG_ERROR("%s: sequential split is not supported when there are coupled sequences in the input batch (you may need to use the -kvu flag)\n", __func__);
481
+
482
+ return {};
483
+ }
484
+
485
+ std::vector<seq_set_t> cur_seq_set;
486
+
487
+ llama_seq_id last_seq_id = -1;
488
+
489
+ // determine the non-overlapping sequence sets participating in this ubatch
490
+ for (int32_t i = 0; i < batch.n_tokens; ++i) {
491
+ if (used[i]) {
492
+ continue;
493
+ }
494
+
495
+ bool add = true;
496
+
497
+ for (uint32_t s = 0; s < cur_seq_set.size(); ++s) {
498
+ // no overlap with existing sequence sets:
499
+ if (!(cur_seq_set[s] & seq_set[i]).none()) {
500
+ add = false;
501
+ break;
502
+ }
503
+ }
504
+
505
+ // accept only increasing sequence ids
506
+ if (sequential) {
507
+ add = add && (cur_seq_set.empty() || batch.seq_id[i][0] == last_seq_id + 1);
508
+ }
509
+
510
+ if (add) {
511
+ cur_seq_set.push_back(seq_set[i]);
512
+
513
+ last_seq_id = batch.seq_id[i][0];
514
+
515
+ if (cur_seq_set.size() > n_ubatch) {
516
+ break;
263
517
  }
264
518
  }
265
- llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1};
266
- seq.push_back(new_seq);
267
- last_seq = &seq.back();
268
519
  }
269
520
 
270
- // keep shared prompts first at the end, then sort by length descending.
271
- std::sort(seq.begin(), seq.end(),
272
- [](llama_sbatch_seq & a, llama_sbatch_seq & b) {
273
- if (a.n_seq_id == b.n_seq_id) {
274
- return a.length > b.length;
275
- }
276
- return a.n_seq_id < b.n_seq_id;
521
+ const uint32_t n_seqs = cur_seq_set.size();
522
+
523
+ // we are done
524
+ if (n_seqs == 0) {
525
+ return {};
526
+ }
527
+
528
+ // the current batch index of each sequence set
529
+ std::vector<int32_t> cur_idx(n_seqs, 0);
530
+
531
+ for (uint32_t s = 0; s < n_seqs; ++s) {
532
+ while (used[seq_set_map[cur_seq_set[s]][cur_idx[s]]]) {
533
+ ++cur_idx[s];
534
+ }
535
+ }
536
+
537
+ // the list of batch indices for each sequence set
538
+ // at the end we will concat these to get the final ubatch
539
+ std::vector<idx_vec_t> idxs_per_seq(n_seqs);
540
+
541
+ while (true) {
542
+ // we can only add new n_seq_tokens tokens if all the sequence sets have at least one more unused token and
543
+ // if we haven't reached n_ubatch
544
+ bool can_expand = true;
545
+
546
+ for (uint32_t s = 0; s < n_seqs; ++s) {
547
+ if (cur_idx[s] >= (int32_t) seq_set_map[cur_seq_set[s]].size()) {
548
+ can_expand = false;
549
+ break;
277
550
  }
278
- );
551
+ }
552
+
553
+ if (!can_expand) {
554
+ break;
555
+ }
556
+
557
+ for (uint32_t s = 0; s < n_seqs; ++s) {
558
+ const int32_t idx = seq_set_map[cur_seq_set[s]][cur_idx[s]];
559
+
560
+ idxs_per_seq[s].push_back(idx);
561
+
562
+ used[idx] = true;
563
+ ++n_used;
564
+
565
+ ++cur_idx[s];
566
+ }
567
+
568
+ if ((idxs_per_seq[0].size() + 1)*n_seqs > n_ubatch) {
569
+ break;
570
+ }
571
+ }
572
+
573
+ // concat the per-sequence-set lists
574
+ std::vector<int32_t> idxs;
575
+
576
+ for (uint32_t s = 0; s < n_seqs; ++s) {
577
+ idxs.insert(idxs.end(), idxs_per_seq[s].begin(), idxs_per_seq[s].end());
578
+ }
579
+
580
+ return ubatch_add(idxs, n_seqs, true);
279
581
  }
280
582
 
281
- llama_batch_allocr::llama_batch_allocr(struct llama_batch in_batch, llama_pos p0) {
282
- batch = in_batch;
283
- GGML_ASSERT(batch.n_tokens > 0);
284
- if (!batch.pos) {
285
- assert(p0 >= 0);
286
- pos.resize(batch.n_tokens);
287
- for (int32_t i = 0; i < batch.n_tokens; i++) {
288
- pos[i] = p0 + i;
583
+ llama_ubatch llama_batch_allocr::split_seq(uint32_t n_ubatch) {
584
+ // find the first unused token
585
+ uint32_t cur_idx = 0;
586
+ while (cur_idx < used.size() && used[cur_idx]) {
587
+ ++cur_idx;
588
+ }
589
+
590
+ // we are done
591
+ if (cur_idx >= used.size()) {
592
+ return {};
593
+ }
594
+
595
+ // this is the starting sequence set
596
+ // we allow adding tokens only if their sequence set is a subset of the current sequence set
597
+ auto cur_seq_set = seq_set[cur_idx];
598
+
599
+ std::vector<int32_t> idxs;
600
+
601
+ while (true) {
602
+ idxs.push_back(cur_idx);
603
+
604
+ used[cur_idx] = true;
605
+ ++n_used;
606
+
607
+ if (idxs.size() >= n_ubatch) {
608
+ break;
289
609
  }
290
- batch.pos = pos.data();
610
+
611
+ do {
612
+ ++cur_idx;
613
+ } while (cur_idx < get_n_tokens() && (used[cur_idx] || ((cur_seq_set & seq_set[cur_idx]) != seq_set[cur_idx])));
614
+
615
+ if (cur_idx == get_n_tokens()) {
616
+ break;
617
+ }
618
+
619
+ cur_seq_set = seq_set[cur_idx];
291
620
  }
292
- if (!batch.n_seq_id) {
293
- n_seq_id.resize(batch.n_tokens);
294
- for (int32_t i = 0; i < batch.n_tokens; i++) {
295
- n_seq_id[i] = seq_id_0.size();
621
+
622
+ return ubatch_add(idxs, 1, true);
623
+ }
624
+
625
+ void llama_batch_allocr::clear() {
626
+ n_outputs = 0;
627
+
628
+ batch = {};
629
+
630
+ pos .clear();
631
+ n_seq_id .clear();
632
+ seq_id .clear();
633
+ seq_id_unq.clear();
634
+ output .clear();
635
+
636
+ for (auto & cur : seq_pos) {
637
+ cur.clear();
638
+ }
639
+
640
+ for (auto & cur : seq_cpl) {
641
+ std::fill(cur.begin(), cur.end(), false);
642
+ }
643
+
644
+ seq_set.clear();
645
+
646
+ seq_set_map.clear();
647
+
648
+ std::fill(seq_idx.begin(), seq_idx.end(), -1);
649
+ }
650
+
651
+ llama_ubatch llama_batch_allocr::ubatch_add(const std::vector<int32_t> & idxs, uint32_t n_seqs, bool equal_seqs) {
652
+ const uint32_t n_tokens = idxs.size();
653
+
654
+ assert(n_tokens%n_seqs == 0);
655
+
656
+ auto udata = std::make_shared<llama_ubatch::data_t>();
657
+
658
+ const int32_t n_pos_cur = batch.embd ? n_pos_per_embd : 1;
659
+
660
+ const int64_t n_embd_all = batch.embd ? (int64_t) n_tokens*n_embd : 0;
661
+ const int64_t n_pos_all = (int64_t) n_tokens*n_pos_cur;
662
+
663
+ udata->token .resize(n_tokens);
664
+ udata->embd .resize(n_embd_all);
665
+ udata->pos .resize(n_pos_all);
666
+ udata->n_seq_id .resize(n_tokens);
667
+ udata->seq_id .resize(n_tokens);
668
+ udata->seq_id_unq.resize(0);
669
+ udata->seq_idx .resize(LLAMA_MAX_SEQ, -1);
670
+ udata->output .resize(n_tokens);
671
+
672
+ seq_set_t seq_set_unq;
673
+
674
+ for (size_t i = 0; i < idxs.size(); ++i) {
675
+ if (batch.token) {
676
+ udata->token[i] = batch.token[idxs[i]];
677
+ }
678
+
679
+ if (batch.embd) {
680
+ memcpy(udata->embd.data() + i*n_embd, batch.embd + (int64_t) idxs[i]*n_embd, n_embd*sizeof(float));
681
+ }
682
+
683
+ for (int j = 0; j < n_pos_cur; ++j) {
684
+ udata->pos[j*n_tokens + i] = batch.pos[j*batch.n_tokens + idxs[i]];
685
+ }
686
+
687
+ udata->n_seq_id[i] = batch.n_seq_id[idxs[i]];
688
+ udata->seq_id[i] = batch.seq_id[idxs[i]];
689
+ udata->output[i] = batch.logits[idxs[i]];
690
+
691
+ for (int s = 0; s < udata->n_seq_id[i]; ++s) {
692
+ seq_set_unq.set(udata->seq_id[i][s]);
693
+ }
694
+
695
+ if (udata->output[i]) {
696
+ out_ids.push_back(idxs[i]);
296
697
  }
297
- batch.n_seq_id = n_seq_id.data();
298
698
  }
299
- if (!batch.seq_id) {
300
- seq_id.resize(batch.n_tokens + 1);
301
- seq_id[batch.n_tokens] = NULL;
302
- for (int32_t i = 0; i < batch.n_tokens; i++) {
303
- seq_id[i] = seq_id_0.data();
699
+
700
+ for (uint32_t s = 0; s < n_seq_max; ++s) {
701
+ if (seq_set_unq.test(s)) {
702
+ udata->seq_idx[s] = udata->seq_id_unq.size();
703
+ udata->seq_id_unq.push_back(s);
304
704
  }
305
- batch.seq_id = seq_id.data();
306
705
  }
307
- if (!batch.logits) {
308
- logits.resize(batch.n_tokens);
309
- logits[logits.size() - 1] = true;
310
- batch.logits = logits.data();
706
+
707
+ llama_ubatch res {
708
+ /*.b_equal_seqs =*/ equal_seqs,
709
+ /*.n_tokens =*/ n_tokens,
710
+ /*.n_seq_tokens =*/ n_tokens/n_seqs,
711
+ /*.n_seqs =*/ n_seqs,
712
+ /*.n_seqs_unq =*/ (uint32_t) udata->seq_id_unq.size(),
713
+
714
+ /*.token =*/ batch.token ? udata->token.data() : nullptr,
715
+ /*.embd =*/ batch.embd ? udata->embd.data() : nullptr,
716
+ /*.pos =*/ udata->pos.data(),
717
+ /*.n_seq_id =*/ udata->n_seq_id.data(),
718
+ /*.seq_id =*/ udata->seq_id.data(),
719
+ /*.seq_id_unq =*/ udata->seq_id_unq.data(),
720
+ /*.seq_idx =*/ udata->seq_idx.data(),
721
+ /*.output =*/ udata->output.data(),
722
+ /*.data =*/ std::move(udata),
723
+ };
724
+
725
+ if (debug > 0) {
726
+ LLAMA_LOG_DEBUG("%s: added ubatch to split:\n", __func__);
727
+
728
+ ubatch_print(res, debug);
729
+ }
730
+
731
+ return res;
732
+ }
733
+
734
+ void llama_batch_allocr::ubatch_print(const llama_ubatch & ubatch, int debug) {
735
+ if (debug > 0) {
736
+ LLAMA_LOG_DEBUG("%s: equal_seqs = %d\n", __func__, ubatch.equal_seqs());
737
+ LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, ubatch.n_tokens);
738
+ LLAMA_LOG_DEBUG("%s: n_seq_tokens = %d\n", __func__, ubatch.n_seq_tokens);
739
+ LLAMA_LOG_DEBUG("%s: n_seqs = %d\n", __func__, ubatch.n_seqs);
740
+ LLAMA_LOG_DEBUG("%s: n_seqs_unq = %d\n", __func__, ubatch.n_seqs_unq);
741
+
742
+ std::stringstream ss_seq_id_unq;
743
+ std::stringstream ss_seq_idx;
744
+
745
+ ss_seq_id_unq << "[ ";
746
+ ss_seq_idx << "[";
747
+
748
+ for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
749
+ ss_seq_id_unq << ubatch.seq_id_unq[s] << " ";
750
+ }
751
+
752
+ for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
753
+ if (ubatch.seq_idx[s] >= 0) {
754
+ ss_seq_idx << ubatch.seq_idx[s]%10;
755
+ } else {
756
+ ss_seq_idx << ".";
757
+ }
758
+ }
759
+
760
+ ss_seq_id_unq << "]";
761
+ ss_seq_idx << "]";
762
+
763
+ LLAMA_LOG_DEBUG("%s: token = %p\n", __func__, (void *) ubatch.token);
764
+ LLAMA_LOG_DEBUG("%s: embd = %p\n", __func__, (void *) ubatch.embd);
765
+ LLAMA_LOG_DEBUG("%s: pos = %p\n", __func__, (void *) ubatch.pos);
766
+ LLAMA_LOG_DEBUG("%s: n_seq_id = %p\n", __func__, (void *) ubatch.n_seq_id);
767
+ LLAMA_LOG_DEBUG("%s: seq_id = %p\n", __func__, (void *) ubatch.seq_id);
768
+ LLAMA_LOG_DEBUG("%s: seq_id_unq = %s\n", __func__, ss_seq_id_unq.str().c_str());
769
+ LLAMA_LOG_DEBUG("%s: seq_idx = %s\n", __func__, ss_seq_idx.str().c_str());
770
+ LLAMA_LOG_DEBUG("%s: output = %p\n", __func__, (void *) ubatch.output);
771
+ LLAMA_LOG_DEBUG("%s: n_outputs = %d\n", __func__, n_outputs);
772
+
773
+ if (debug > 1) {
774
+ int seq_id_max = 0;
775
+ for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
776
+ for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
777
+ for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
778
+ seq_id_max = std::max(seq_id_max, ubatch.seq_id[i][s]);
779
+ }
780
+ }
781
+ }
782
+ ++seq_id_max;
783
+
784
+ LLAMA_LOG_DEBUG("%s: token = [\n", __func__);
785
+ for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
786
+ std::vector<int8_t> seq_id(seq_id_max);
787
+
788
+ for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
789
+ seq_id[ubatch.seq_id[i][s]] = 1;
790
+ }
791
+
792
+ std::stringstream ss;
793
+ for (int s = 0; s < seq_id_max; ++s) {
794
+ if (seq_id[s]) {
795
+ ss << s%10;
796
+ } else {
797
+ ss << ".";
798
+ }
799
+ }
800
+
801
+ if (ubatch.token) {
802
+ LLAMA_LOG_DEBUG("%s: %4d: id = %6d (%16s), pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n",
803
+ __func__, i, ubatch.token[i], vocab->token_to_piece(ubatch.token[i]).c_str(),
804
+ ubatch.pos[i], ubatch.n_seq_id[i], ss.str().c_str(), ubatch.output[i]);
805
+ } else {
806
+ LLAMA_LOG_DEBUG("%s: %4d: [embd], pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n",
807
+ __func__, i, ubatch.pos[i], ubatch.n_seq_id[i], ss.str().c_str(), ubatch.output[i]);
808
+ }
809
+ }
810
+ LLAMA_LOG_DEBUG("%s: ]\n", __func__);
811
+ }
311
812
  }
312
813
  }
313
814
 
@@ -319,25 +820,25 @@ struct llama_batch llama_batch_get_one(
319
820
  llama_token * tokens,
320
821
  int32_t n_tokens) {
321
822
  return {
322
- /*n_tokens =*/ n_tokens,
323
- /*tokens =*/ tokens,
324
- /*embd =*/ nullptr,
325
- /*pos =*/ nullptr,
326
- /*n_seq_id =*/ nullptr,
327
- /*seq_id =*/ nullptr,
328
- /*logits =*/ nullptr,
823
+ /*n_tokens =*/ n_tokens,
824
+ /*tokens =*/ tokens,
825
+ /*embd =*/ nullptr,
826
+ /*pos =*/ nullptr,
827
+ /*n_seq_id =*/ nullptr,
828
+ /*seq_id =*/ nullptr,
829
+ /*logits =*/ nullptr,
329
830
  };
330
831
  }
331
832
 
332
833
  struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) {
333
834
  llama_batch batch = {
334
- /*n_tokens =*/ 0,
335
- /*tokens =*/ nullptr,
336
- /*embd =*/ nullptr,
337
- /*pos =*/ nullptr,
338
- /*n_seq_id =*/ nullptr,
339
- /*seq_id =*/ nullptr,
340
- /*logits =*/ nullptr,
835
+ /*n_tokens =*/ 0,
836
+ /*tokens =*/ nullptr,
837
+ /*embd =*/ nullptr,
838
+ /*pos =*/ nullptr,
839
+ /*n_seq_id =*/ nullptr,
840
+ /*seq_id =*/ nullptr,
841
+ /*logits =*/ nullptr,
341
842
  };
342
843
 
343
844
  if (embd) {