whispercpp 1.3.1 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (797) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +4 -3
  3. data/README.md +92 -31
  4. data/Rakefile +26 -7
  5. data/ext/.gitignore +5 -7
  6. data/ext/dependencies.rb +61 -0
  7. data/ext/extconf.rb +21 -198
  8. data/ext/options.rb +221 -0
  9. data/ext/ruby_whisper.c +159 -0
  10. data/ext/ruby_whisper.h +17 -2
  11. data/ext/ruby_whisper_context.c +641 -0
  12. data/ext/ruby_whisper_error.c +52 -0
  13. data/ext/ruby_whisper_model.c +232 -0
  14. data/ext/ruby_whisper_params.c +1301 -0
  15. data/ext/ruby_whisper_segment.c +143 -0
  16. data/ext/ruby_whisper_transcribe.cpp +87 -0
  17. data/ext/ruby_whisper_vad_params.c +288 -0
  18. data/ext/sources/.dockerignore +3 -0
  19. data/ext/sources/.github/workflows/bindings-ruby.yml +21 -0
  20. data/ext/sources/CMakeGraphVizOptions.cmake +8 -0
  21. data/ext/sources/CMakeLists.txt +251 -0
  22. data/ext/sources/bindings/javascript/CMakeLists.txt +41 -0
  23. data/ext/sources/bindings/javascript/emscripten.cpp +93 -0
  24. data/ext/sources/bindings/javascript/libwhisper.worker.js +1 -0
  25. data/ext/sources/bindings/javascript/package-tmpl.json +26 -0
  26. data/ext/sources/bindings/javascript/package.json +26 -0
  27. data/ext/sources/bindings/javascript/whisper.js +19 -0
  28. data/ext/sources/build-xcframework.sh +547 -0
  29. data/ext/sources/ci/run.sh +336 -0
  30. data/ext/sources/close-issue.yml +28 -0
  31. data/ext/sources/cmake/DefaultTargetOptions.cmake +16 -0
  32. data/ext/sources/cmake/FindFFmpeg.cmake +163 -0
  33. data/ext/sources/cmake/build-info.cmake +60 -0
  34. data/ext/sources/cmake/git-vars.cmake +22 -0
  35. data/ext/sources/cmake/whisper-config.cmake.in +65 -0
  36. data/ext/sources/cmake/whisper.pc.in +10 -0
  37. data/ext/sources/examples/CMakeLists.txt +124 -0
  38. data/ext/sources/examples/addon.node/CMakeLists.txt +31 -0
  39. data/ext/sources/examples/addon.node/__test__/whisper.spec.js +37 -0
  40. data/ext/sources/examples/addon.node/addon.cpp +438 -0
  41. data/ext/sources/examples/addon.node/index.js +54 -0
  42. data/ext/sources/examples/addon.node/package.json +16 -0
  43. data/ext/sources/examples/bench/CMakeLists.txt +8 -0
  44. data/ext/sources/examples/bench/bench.cpp +175 -0
  45. data/ext/sources/examples/bench.wasm/CMakeLists.txt +49 -0
  46. data/ext/sources/examples/bench.wasm/emscripten.cpp +87 -0
  47. data/ext/sources/examples/bench.wasm/index-tmpl.html +284 -0
  48. data/ext/sources/examples/cli/CMakeLists.txt +8 -0
  49. data/ext/sources/examples/cli/cli.cpp +1294 -0
  50. data/ext/sources/examples/coi-serviceworker.js +146 -0
  51. data/ext/sources/examples/command/CMakeLists.txt +10 -0
  52. data/ext/sources/examples/command/command.cpp +776 -0
  53. data/ext/sources/examples/command/commands.txt +9 -0
  54. data/ext/sources/examples/command.wasm/CMakeLists.txt +50 -0
  55. data/ext/sources/examples/command.wasm/emscripten.cpp +327 -0
  56. data/ext/sources/examples/command.wasm/index-tmpl.html +414 -0
  57. data/ext/sources/examples/common-ggml.cpp +238 -0
  58. data/ext/sources/examples/common-ggml.h +18 -0
  59. data/ext/sources/examples/common-sdl.cpp +227 -0
  60. data/ext/sources/examples/common-sdl.h +49 -0
  61. data/ext/sources/examples/common-whisper.cpp +168 -0
  62. data/ext/sources/examples/common-whisper.h +24 -0
  63. data/ext/sources/examples/common.cpp +675 -0
  64. data/ext/sources/examples/common.h +322 -0
  65. data/ext/sources/examples/deprecation-warning/CMakeLists.txt +6 -0
  66. data/ext/sources/examples/deprecation-warning/deprecation-warning.cpp +38 -0
  67. data/ext/sources/examples/ffmpeg-transcode.cpp +368 -0
  68. data/ext/sources/examples/generate-karaoke.sh +57 -0
  69. data/ext/sources/examples/grammar-parser.cpp +423 -0
  70. data/ext/sources/examples/grammar-parser.h +29 -0
  71. data/ext/sources/examples/helpers.js +191 -0
  72. data/ext/sources/examples/json.hpp +24596 -0
  73. data/ext/sources/examples/livestream.sh +112 -0
  74. data/ext/sources/examples/lsp/CMakeLists.txt +9 -0
  75. data/ext/sources/examples/lsp/lsp.cpp +467 -0
  76. data/ext/sources/examples/lsp/whisper.vim +362 -0
  77. data/ext/sources/examples/miniaudio.h +93468 -0
  78. data/ext/sources/examples/python/test_whisper_processor.py +7 -0
  79. data/ext/sources/examples/python/whisper_processor.py +54 -0
  80. data/ext/sources/examples/quantize/CMakeLists.txt +6 -0
  81. data/ext/sources/examples/quantize/quantize.cpp +223 -0
  82. data/ext/sources/examples/server/CMakeLists.txt +12 -0
  83. data/ext/sources/examples/server/bench.js +29 -0
  84. data/ext/sources/examples/server/httplib.h +10497 -0
  85. data/ext/sources/examples/server/server.cpp +1091 -0
  86. data/ext/sources/examples/server.py +115 -0
  87. data/ext/sources/examples/stb_vorbis.c +5584 -0
  88. data/ext/sources/examples/stream/CMakeLists.txt +10 -0
  89. data/ext/sources/examples/stream/stream.cpp +429 -0
  90. data/ext/sources/examples/stream.wasm/CMakeLists.txt +49 -0
  91. data/ext/sources/examples/stream.wasm/emscripten.cpp +216 -0
  92. data/ext/sources/examples/stream.wasm/index-tmpl.html +414 -0
  93. data/ext/sources/examples/sycl/CMakeLists.txt +9 -0
  94. data/ext/sources/examples/sycl/build.sh +22 -0
  95. data/ext/sources/examples/sycl/ls-sycl-device.cpp +11 -0
  96. data/ext/sources/examples/sycl/run-whisper.sh +17 -0
  97. data/ext/sources/examples/talk-llama/CMakeLists.txt +40 -0
  98. data/ext/sources/examples/talk-llama/eleven-labs.py +80 -0
  99. data/ext/sources/examples/talk-llama/llama-adapter.cpp +388 -0
  100. data/ext/sources/examples/talk-llama/llama-adapter.h +76 -0
  101. data/ext/sources/examples/talk-llama/llama-arch.cpp +1746 -0
  102. data/ext/sources/examples/talk-llama/llama-arch.h +437 -0
  103. data/ext/sources/examples/talk-llama/llama-batch.cpp +374 -0
  104. data/ext/sources/examples/talk-llama/llama-batch.h +89 -0
  105. data/ext/sources/examples/talk-llama/llama-chat.cpp +663 -0
  106. data/ext/sources/examples/talk-llama/llama-chat.h +58 -0
  107. data/ext/sources/examples/talk-llama/llama-context.cpp +2676 -0
  108. data/ext/sources/examples/talk-llama/llama-context.h +276 -0
  109. data/ext/sources/examples/talk-llama/llama-cparams.cpp +5 -0
  110. data/ext/sources/examples/talk-llama/llama-cparams.h +41 -0
  111. data/ext/sources/examples/talk-llama/llama-grammar.cpp +1229 -0
  112. data/ext/sources/examples/talk-llama/llama-grammar.h +173 -0
  113. data/ext/sources/examples/talk-llama/llama-graph.cpp +1618 -0
  114. data/ext/sources/examples/talk-llama/llama-graph.h +640 -0
  115. data/ext/sources/examples/talk-llama/llama-hparams.cpp +95 -0
  116. data/ext/sources/examples/talk-llama/llama-hparams.h +190 -0
  117. data/ext/sources/examples/talk-llama/llama-impl.cpp +167 -0
  118. data/ext/sources/examples/talk-llama/llama-impl.h +61 -0
  119. data/ext/sources/examples/talk-llama/llama-io.cpp +15 -0
  120. data/ext/sources/examples/talk-llama/llama-io.h +35 -0
  121. data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +2739 -0
  122. data/ext/sources/examples/talk-llama/llama-kv-cache.h +502 -0
  123. data/ext/sources/examples/talk-llama/llama-kv-cells.h +379 -0
  124. data/ext/sources/examples/talk-llama/llama-memory.cpp +1 -0
  125. data/ext/sources/examples/talk-llama/llama-memory.h +32 -0
  126. data/ext/sources/examples/talk-llama/llama-mmap.cpp +600 -0
  127. data/ext/sources/examples/talk-llama/llama-mmap.h +68 -0
  128. data/ext/sources/examples/talk-llama/llama-model-loader.cpp +1138 -0
  129. data/ext/sources/examples/talk-llama/llama-model-loader.h +169 -0
  130. data/ext/sources/examples/talk-llama/llama-model-saver.cpp +281 -0
  131. data/ext/sources/examples/talk-llama/llama-model-saver.h +37 -0
  132. data/ext/sources/examples/talk-llama/llama-model.cpp +13814 -0
  133. data/ext/sources/examples/talk-llama/llama-model.h +425 -0
  134. data/ext/sources/examples/talk-llama/llama-quant.cpp +966 -0
  135. data/ext/sources/examples/talk-llama/llama-quant.h +1 -0
  136. data/ext/sources/examples/talk-llama/llama-sampling.cpp +2575 -0
  137. data/ext/sources/examples/talk-llama/llama-sampling.h +32 -0
  138. data/ext/sources/examples/talk-llama/llama-vocab.cpp +3340 -0
  139. data/ext/sources/examples/talk-llama/llama-vocab.h +131 -0
  140. data/ext/sources/examples/talk-llama/llama.cpp +354 -0
  141. data/ext/sources/examples/talk-llama/llama.h +1377 -0
  142. data/ext/sources/examples/talk-llama/prompts/talk-alpaca.txt +23 -0
  143. data/ext/sources/examples/talk-llama/speak +40 -0
  144. data/ext/sources/examples/talk-llama/speak.bat +1 -0
  145. data/ext/sources/examples/talk-llama/speak.ps1 +14 -0
  146. data/ext/sources/examples/talk-llama/talk-llama.cpp +808 -0
  147. data/ext/sources/examples/talk-llama/unicode-data.cpp +7034 -0
  148. data/ext/sources/examples/talk-llama/unicode-data.h +20 -0
  149. data/ext/sources/examples/talk-llama/unicode.cpp +849 -0
  150. data/ext/sources/examples/talk-llama/unicode.h +66 -0
  151. data/ext/sources/examples/vad-speech-segments/CMakeLists.txt +8 -0
  152. data/ext/sources/examples/vad-speech-segments/speech.cpp +143 -0
  153. data/ext/sources/examples/wchess/CMakeLists.txt +10 -0
  154. data/ext/sources/examples/wchess/libwchess/CMakeLists.txt +19 -0
  155. data/ext/sources/examples/wchess/libwchess/Chessboard.cpp +803 -0
  156. data/ext/sources/examples/wchess/libwchess/Chessboard.h +33 -0
  157. data/ext/sources/examples/wchess/libwchess/WChess.cpp +193 -0
  158. data/ext/sources/examples/wchess/libwchess/WChess.h +63 -0
  159. data/ext/sources/examples/wchess/libwchess/test-chessboard.cpp +117 -0
  160. data/ext/sources/examples/wchess/wchess.cmd/CMakeLists.txt +8 -0
  161. data/ext/sources/examples/wchess/wchess.cmd/wchess.cmd.cpp +249 -0
  162. data/ext/sources/examples/whisper.wasm/CMakeLists.txt +50 -0
  163. data/ext/sources/examples/whisper.wasm/emscripten.cpp +118 -0
  164. data/ext/sources/examples/whisper.wasm/index-tmpl.html +658 -0
  165. data/ext/sources/ggml/CMakeLists.txt +390 -0
  166. data/ext/sources/ggml/cmake/BuildTypes.cmake +54 -0
  167. data/ext/sources/ggml/cmake/GitVars.cmake +22 -0
  168. data/ext/sources/ggml/cmake/common.cmake +26 -0
  169. data/ext/sources/ggml/cmake/ggml-config.cmake.in +152 -0
  170. data/ext/{ggml → sources/ggml}/include/ggml-alloc.h +1 -1
  171. data/ext/{ggml → sources/ggml}/include/ggml-backend.h +9 -7
  172. data/ext/{ggml → sources/ggml}/include/ggml-cpp.h +2 -1
  173. data/ext/{ggml → sources/ggml}/include/ggml-cpu.h +9 -1
  174. data/ext/{ggml → sources/ggml}/include/ggml-metal.h +1 -1
  175. data/ext/{ggml → sources/ggml}/include/ggml-opt.h +49 -28
  176. data/ext/{ggml → sources/ggml}/include/ggml-rpc.h +6 -1
  177. data/ext/{ggml → sources/ggml}/include/ggml-vulkan.h +0 -2
  178. data/ext/{ggml → sources/ggml}/include/ggml.h +182 -265
  179. data/ext/sources/ggml/include/gguf.h +202 -0
  180. data/ext/sources/ggml/src/CMakeLists.txt +346 -0
  181. data/ext/{ggml → sources/ggml}/src/ggml-alloc.c +34 -29
  182. data/ext/sources/ggml/src/ggml-amx/CMakeLists.txt +107 -0
  183. data/ext/{ggml → sources/ggml}/src/ggml-backend-impl.h +1 -2
  184. data/ext/{ggml → sources/ggml}/src/ggml-backend-reg.cpp +87 -53
  185. data/ext/{ggml → sources/ggml}/src/ggml-backend.cpp +26 -14
  186. data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +87 -0
  187. data/ext/sources/ggml/src/ggml-cann/CMakeLists.txt +74 -0
  188. data/ext/sources/ggml/src/ggml-cann/Doxyfile +2579 -0
  189. data/ext/{ggml → sources/ggml}/src/ggml-cann/acl_tensor.cpp +10 -4
  190. data/ext/{ggml → sources/ggml}/src/ggml-cann/acl_tensor.h +5 -5
  191. data/ext/{ggml → sources/ggml}/src/ggml-cann/aclnn_ops.cpp +1272 -1506
  192. data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +1125 -0
  193. data/ext/{ggml → sources/ggml}/src/ggml-cann/common.h +135 -1
  194. data/ext/{ggml → sources/ggml}/src/ggml-cann/ggml-cann.cpp +564 -146
  195. data/ext/sources/ggml/src/ggml-cann/kernels/CMakeLists.txt +30 -0
  196. data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/dup.cpp +3 -5
  197. data/ext/{ggml → sources/ggml}/src/ggml-common.h +12 -8
  198. data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +504 -0
  199. data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/amx.cpp +2 -1
  200. data/ext/sources/ggml/src/ggml-cpu/binary-ops.cpp +158 -0
  201. data/ext/sources/ggml/src/ggml-cpu/binary-ops.h +16 -0
  202. data/ext/sources/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +100 -0
  203. data/ext/sources/ggml/src/ggml-cpu/common.h +72 -0
  204. data/ext/{ggml → sources/ggml}/src/ggml-cpu/cpu-feats-x86.cpp +5 -1
  205. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +6431 -0
  206. data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-impl.h +163 -41
  207. data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-quants.c +4029 -1117
  208. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +3510 -0
  209. data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu.cpp +67 -18
  210. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +337 -0
  211. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.h +95 -0
  212. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +482 -0
  213. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.h +17 -0
  214. data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +3544 -0
  215. data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.h +14 -0
  216. data/ext/sources/ggml/src/ggml-cpu/ops.cpp +8903 -0
  217. data/ext/sources/ggml/src/ggml-cpu/ops.h +110 -0
  218. data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +892 -0
  219. data/ext/sources/ggml/src/ggml-cpu/unary-ops.cpp +186 -0
  220. data/ext/sources/ggml/src/ggml-cpu/unary-ops.h +28 -0
  221. data/ext/sources/ggml/src/ggml-cpu/vec.cpp +252 -0
  222. data/ext/sources/ggml/src/ggml-cpu/vec.h +818 -0
  223. data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +184 -0
  224. data/ext/sources/ggml/src/ggml-cuda/acc.cu +61 -0
  225. data/ext/sources/ggml/src/ggml-cuda/acc.cuh +5 -0
  226. data/ext/sources/ggml/src/ggml-cuda/arange.cu +34 -0
  227. data/ext/sources/ggml/src/ggml-cuda/arange.cuh +5 -0
  228. data/ext/sources/ggml/src/ggml-cuda/argmax.cu +91 -0
  229. data/ext/sources/ggml/src/ggml-cuda/argmax.cuh +3 -0
  230. data/ext/sources/ggml/src/ggml-cuda/argsort.cu +104 -0
  231. data/ext/sources/ggml/src/ggml-cuda/argsort.cuh +3 -0
  232. data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +363 -0
  233. data/ext/sources/ggml/src/ggml-cuda/binbcast.cuh +9 -0
  234. data/ext/sources/ggml/src/ggml-cuda/clamp.cu +45 -0
  235. data/ext/sources/ggml/src/ggml-cuda/clamp.cuh +5 -0
  236. data/ext/sources/ggml/src/ggml-cuda/common.cuh +828 -0
  237. data/ext/sources/ggml/src/ggml-cuda/concat.cu +221 -0
  238. data/ext/sources/ggml/src/ggml-cuda/concat.cuh +5 -0
  239. data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cu +89 -0
  240. data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cuh +5 -0
  241. data/ext/sources/ggml/src/ggml-cuda/convert.cu +730 -0
  242. data/ext/sources/ggml/src/ggml-cuda/convert.cuh +26 -0
  243. data/ext/sources/ggml/src/ggml-cuda/count-equal.cu +64 -0
  244. data/ext/sources/ggml/src/ggml-cuda/count-equal.cuh +5 -0
  245. data/ext/sources/ggml/src/ggml-cuda/cp-async.cuh +57 -0
  246. data/ext/sources/ggml/src/ggml-cuda/cpy.cu +705 -0
  247. data/ext/sources/ggml/src/ggml-cuda/cpy.cuh +11 -0
  248. data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cu +189 -0
  249. data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cuh +7 -0
  250. data/ext/sources/ggml/src/ggml-cuda/dequantize.cuh +103 -0
  251. data/ext/sources/ggml/src/ggml-cuda/diagmask.cu +40 -0
  252. data/ext/sources/ggml/src/ggml-cuda/diagmask.cuh +5 -0
  253. data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +881 -0
  254. data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +1471 -0
  255. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cu +357 -0
  256. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cuh +3 -0
  257. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cu +365 -0
  258. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cuh +3 -0
  259. data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f16.cuh +482 -0
  260. data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f32.cuh +472 -0
  261. data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +634 -0
  262. data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +3 -0
  263. data/ext/sources/ggml/src/ggml-cuda/fattn.cu +346 -0
  264. data/ext/sources/ggml/src/ggml-cuda/fattn.cuh +3 -0
  265. data/ext/sources/ggml/src/ggml-cuda/getrows.cu +275 -0
  266. data/ext/sources/ggml/src/ggml-cuda/getrows.cuh +15 -0
  267. data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +3505 -0
  268. data/ext/sources/ggml/src/ggml-cuda/gla.cu +93 -0
  269. data/ext/sources/ggml/src/ggml-cuda/gla.cuh +3 -0
  270. data/ext/sources/ggml/src/ggml-cuda/im2col.cu +103 -0
  271. data/ext/sources/ggml/src/ggml-cuda/im2col.cuh +5 -0
  272. data/ext/sources/ggml/src/ggml-cuda/mma.cuh +396 -0
  273. data/ext/sources/ggml/src/ggml-cuda/mmq.cu +324 -0
  274. data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +3217 -0
  275. data/ext/sources/ggml/src/ggml-cuda/mmv.cu +336 -0
  276. data/ext/sources/ggml/src/ggml-cuda/mmv.cuh +12 -0
  277. data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +595 -0
  278. data/ext/sources/ggml/src/ggml-cuda/mmvq.cuh +12 -0
  279. data/ext/sources/ggml/src/ggml-cuda/norm.cu +458 -0
  280. data/ext/sources/ggml/src/ggml-cuda/norm.cuh +11 -0
  281. data/ext/sources/ggml/src/ggml-cuda/opt-step-adamw.cu +78 -0
  282. data/ext/sources/ggml/src/ggml-cuda/opt-step-adamw.cuh +5 -0
  283. data/ext/sources/ggml/src/ggml-cuda/out-prod.cu +68 -0
  284. data/ext/sources/ggml/src/ggml-cuda/out-prod.cuh +3 -0
  285. data/ext/sources/ggml/src/ggml-cuda/pad.cu +49 -0
  286. data/ext/sources/ggml/src/ggml-cuda/pad.cuh +5 -0
  287. data/ext/sources/ggml/src/ggml-cuda/pool2d.cu +94 -0
  288. data/ext/sources/ggml/src/ggml-cuda/pool2d.cuh +5 -0
  289. data/ext/sources/ggml/src/ggml-cuda/quantize.cu +190 -0
  290. data/ext/sources/ggml/src/ggml-cuda/quantize.cuh +27 -0
  291. data/ext/sources/ggml/src/ggml-cuda/rope.cu +456 -0
  292. data/ext/sources/ggml/src/ggml-cuda/rope.cuh +7 -0
  293. data/ext/sources/ggml/src/ggml-cuda/scale.cu +31 -0
  294. data/ext/sources/ggml/src/ggml-cuda/scale.cuh +5 -0
  295. data/ext/sources/ggml/src/ggml-cuda/softmax.cu +283 -0
  296. data/ext/sources/ggml/src/ggml-cuda/softmax.cuh +7 -0
  297. data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +148 -0
  298. data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cuh +3 -0
  299. data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cu +153 -0
  300. data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cuh +3 -0
  301. data/ext/sources/ggml/src/ggml-cuda/sum.cu +45 -0
  302. data/ext/sources/ggml/src/ggml-cuda/sum.cuh +5 -0
  303. data/ext/sources/ggml/src/ggml-cuda/sumrows.cu +39 -0
  304. data/ext/sources/ggml/src/ggml-cuda/sumrows.cuh +5 -0
  305. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_16.cu +5 -0
  306. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_8.cu +10 -0
  307. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_1.cu +10 -0
  308. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_2.cu +10 -0
  309. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +10 -0
  310. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_16.cu +5 -0
  311. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +10 -0
  312. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_8.cu +10 -0
  313. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_1.cu +10 -0
  314. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_2.cu +10 -0
  315. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_16.cu +5 -0
  316. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_2.cu +10 -0
  317. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +10 -0
  318. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_8.cu +10 -0
  319. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_64-ncols2_1.cu +10 -0
  320. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_1.cu +10 -0
  321. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_2.cu +10 -0
  322. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +10 -0
  323. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_8.cu +10 -0
  324. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +5 -0
  325. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +5 -0
  326. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +5 -0
  327. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +5 -0
  328. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +5 -0
  329. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +5 -0
  330. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +5 -0
  331. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +5 -0
  332. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +5 -0
  333. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +5 -0
  334. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +5 -0
  335. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +5 -0
  336. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +5 -0
  337. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +5 -0
  338. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +5 -0
  339. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +5 -0
  340. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +5 -0
  341. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +5 -0
  342. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +5 -0
  343. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +5 -0
  344. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +5 -0
  345. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +5 -0
  346. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +5 -0
  347. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +5 -0
  348. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +5 -0
  349. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +5 -0
  350. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +5 -0
  351. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +5 -0
  352. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +5 -0
  353. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +5 -0
  354. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +5 -0
  355. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +5 -0
  356. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +5 -0
  357. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +5 -0
  358. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +5 -0
  359. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +5 -0
  360. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +5 -0
  361. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +5 -0
  362. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +5 -0
  363. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +5 -0
  364. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +5 -0
  365. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +5 -0
  366. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +5 -0
  367. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +5 -0
  368. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +5 -0
  369. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +5 -0
  370. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +5 -0
  371. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +5 -0
  372. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +5 -0
  373. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +5 -0
  374. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +5 -0
  375. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +5 -0
  376. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +5 -0
  377. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +5 -0
  378. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +5 -0
  379. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +5 -0
  380. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +5 -0
  381. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +5 -0
  382. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +5 -0
  383. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +5 -0
  384. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +5 -0
  385. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +5 -0
  386. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +5 -0
  387. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +5 -0
  388. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +5 -0
  389. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +5 -0
  390. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +5 -0
  391. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +5 -0
  392. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +5 -0
  393. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +5 -0
  394. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +5 -0
  395. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +5 -0
  396. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +5 -0
  397. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +5 -0
  398. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +5 -0
  399. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +5 -0
  400. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +5 -0
  401. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +5 -0
  402. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +5 -0
  403. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +5 -0
  404. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +5 -0
  405. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +5 -0
  406. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +5 -0
  407. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +5 -0
  408. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +5 -0
  409. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +5 -0
  410. data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +78 -0
  411. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_s.cu +5 -0
  412. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_s.cu +5 -0
  413. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu +5 -0
  414. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu +5 -0
  415. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_s.cu +5 -0
  416. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu +5 -0
  417. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu +5 -0
  418. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu +5 -0
  419. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q2_k.cu +5 -0
  420. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q3_k.cu +5 -0
  421. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_0.cu +5 -0
  422. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_1.cu +5 -0
  423. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_k.cu +5 -0
  424. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_0.cu +5 -0
  425. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_1.cu +5 -0
  426. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_k.cu +5 -0
  427. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q6_k.cu +5 -0
  428. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q8_0.cu +5 -0
  429. data/ext/sources/ggml/src/ggml-cuda/tsembd.cu +47 -0
  430. data/ext/sources/ggml/src/ggml-cuda/tsembd.cuh +5 -0
  431. data/ext/sources/ggml/src/ggml-cuda/unary.cu +289 -0
  432. data/ext/sources/ggml/src/ggml-cuda/unary.cuh +59 -0
  433. data/ext/sources/ggml/src/ggml-cuda/upscale.cu +51 -0
  434. data/ext/sources/ggml/src/ggml-cuda/upscale.cuh +5 -0
  435. data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +1135 -0
  436. data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/cuda.h +1 -0
  437. data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/hip.h +57 -0
  438. data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/musa.h +7 -1
  439. data/ext/sources/ggml/src/ggml-cuda/wkv.cu +199 -0
  440. data/ext/sources/ggml/src/ggml-cuda/wkv.cuh +7 -0
  441. data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +131 -0
  442. data/ext/{ggml → sources/ggml}/src/ggml-impl.h +64 -19
  443. data/ext/sources/ggml/src/ggml-kompute/CMakeLists.txt +166 -0
  444. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/common.comp +112 -0
  445. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_add.comp +58 -0
  446. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_addrow.comp +25 -0
  447. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f16.comp +52 -0
  448. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f32.comp +52 -0
  449. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f16.comp +52 -0
  450. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f32.comp +52 -0
  451. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_diagmask.comp +30 -0
  452. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_gelu.comp +22 -0
  453. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows.comp +17 -0
  454. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f16.comp +31 -0
  455. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f32.comp +31 -0
  456. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_0.comp +38 -0
  457. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_1.comp +39 -0
  458. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q6_k.comp +44 -0
  459. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul.comp +52 -0
  460. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp +69 -0
  461. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_mat_f32.comp +51 -0
  462. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_0.comp +33 -0
  463. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_1.comp +35 -0
  464. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp +140 -0
  465. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp +106 -0
  466. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q8_0.comp +73 -0
  467. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp +52 -0
  468. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp +28 -0
  469. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_norm.comp +84 -0
  470. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_relu.comp +21 -0
  471. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rmsnorm.comp +53 -0
  472. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp +52 -0
  473. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp +52 -0
  474. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp +52 -0
  475. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp +52 -0
  476. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale.comp +19 -0
  477. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale_8.comp +23 -0
  478. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_silu.comp +22 -0
  479. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp +72 -0
  480. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp +71 -0
  481. data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +120 -0
  482. data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +622 -0
  483. data/ext/{ggml → sources/ggml}/src/ggml-metal/ggml-metal.m +2178 -1064
  484. data/ext/{ggml → sources/ggml}/src/ggml-metal/ggml-metal.metal +1575 -1218
  485. data/ext/sources/ggml/src/ggml-musa/CMakeLists.txt +113 -0
  486. data/ext/sources/ggml/src/ggml-musa/mudnn.cu +112 -0
  487. data/ext/sources/ggml/src/ggml-musa/mudnn.cuh +12 -0
  488. data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +96 -0
  489. data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +5124 -0
  490. data/ext/sources/ggml/src/ggml-opencl/kernels/add.cl +83 -0
  491. data/ext/sources/ggml/src/ggml-opencl/kernels/clamp.cl +20 -0
  492. data/ext/sources/ggml/src/ggml-opencl/kernels/cpy.cl +184 -0
  493. data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +118 -0
  494. data/ext/sources/ggml/src/ggml-opencl/kernels/diag_mask_inf.cl +58 -0
  495. data/ext/sources/ggml/src/ggml-opencl/kernels/embed_kernel.py +26 -0
  496. data/ext/sources/ggml/src/ggml-opencl/kernels/gelu.cl +62 -0
  497. data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle.cl +268 -0
  498. data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general.cl +274 -0
  499. data/ext/sources/ggml/src/ggml-opencl/kernels/get_rows.cl +163 -0
  500. data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f16.cl +57 -0
  501. data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f32.cl +57 -0
  502. data/ext/sources/ggml/src/ggml-opencl/kernels/mul.cl +79 -0
  503. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mat_Ab_Bi_8x4.cl +139 -0
  504. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f16.cl +118 -0
  505. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32.cl +118 -0
  506. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_1row.cl +94 -0
  507. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_l4.cl +84 -0
  508. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f32_f32.cl +118 -0
  509. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32.cl +192 -0
  510. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_16x_flat.cl +307 -0
  511. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_8x_flat.cl +265 -0
  512. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_8x_flat.cl +272 -0
  513. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_v.cl +254 -0
  514. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q6_k.cl +190 -0
  515. data/ext/sources/ggml/src/ggml-opencl/kernels/norm.cl +81 -0
  516. data/ext/sources/ggml/src/ggml-opencl/kernels/relu.cl +16 -0
  517. data/ext/sources/ggml/src/ggml-opencl/kernels/rms_norm.cl +96 -0
  518. data/ext/sources/ggml/src/ggml-opencl/kernels/rope.cl +721 -0
  519. data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +16 -0
  520. data/ext/sources/ggml/src/ggml-opencl/kernels/silu.cl +30 -0
  521. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f16.cl +87 -0
  522. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f32.cl +87 -0
  523. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f16.cl +86 -0
  524. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f32.cl +86 -0
  525. data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +84 -0
  526. data/ext/{ggml → sources/ggml}/src/ggml-opt.cpp +373 -190
  527. data/ext/{ggml → sources/ggml}/src/ggml-quants.c +114 -120
  528. data/ext/sources/ggml/src/ggml-rpc/CMakeLists.txt +9 -0
  529. data/ext/{ggml → sources/ggml}/src/ggml-rpc/ggml-rpc.cpp +480 -73
  530. data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +189 -0
  531. data/ext/sources/ggml/src/ggml-sycl/backend.hpp +37 -0
  532. data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +345 -0
  533. data/ext/sources/ggml/src/ggml-sycl/binbcast.hpp +39 -0
  534. data/ext/{ggml → sources/ggml}/src/ggml-sycl/common.cpp +20 -32
  535. data/ext/sources/ggml/src/ggml-sycl/common.hpp +589 -0
  536. data/ext/{ggml → sources/ggml}/src/ggml-sycl/concat.cpp +32 -33
  537. data/ext/sources/ggml/src/ggml-sycl/concat.hpp +20 -0
  538. data/ext/{ggml → sources/ggml}/src/ggml-sycl/conv.cpp +4 -2
  539. data/ext/sources/ggml/src/ggml-sycl/conv.hpp +20 -0
  540. data/ext/{ggml → sources/ggml}/src/ggml-sycl/convert.cpp +104 -28
  541. data/ext/sources/ggml/src/ggml-sycl/convert.hpp +34 -0
  542. data/ext/sources/ggml/src/ggml-sycl/cpy.cpp +700 -0
  543. data/ext/sources/ggml/src/ggml-sycl/cpy.hpp +11 -0
  544. data/ext/sources/ggml/src/ggml-sycl/dequantize.hpp +791 -0
  545. data/ext/{ggml → sources/ggml}/src/ggml-sycl/dmmv.cpp +156 -17
  546. data/ext/sources/ggml/src/ggml-sycl/dmmv.hpp +27 -0
  547. data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +2957 -0
  548. data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +1511 -0
  549. data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +75 -0
  550. data/ext/sources/ggml/src/ggml-sycl/gemm.hpp +99 -0
  551. data/ext/sources/ggml/src/ggml-sycl/getrows.cpp +309 -0
  552. data/ext/sources/ggml/src/ggml-sycl/getrows.hpp +20 -0
  553. data/ext/{ggml → sources/ggml}/src/ggml-sycl/ggml-sycl.cpp +1004 -1240
  554. data/ext/sources/ggml/src/ggml-sycl/gla.cpp +106 -0
  555. data/ext/sources/ggml/src/ggml-sycl/gla.hpp +8 -0
  556. data/ext/sources/ggml/src/ggml-sycl/im2col.cpp +136 -0
  557. data/ext/sources/ggml/src/ggml-sycl/im2col.hpp +21 -0
  558. data/ext/{ggml → sources/ggml}/src/ggml-sycl/mmq.cpp +0 -1
  559. data/ext/sources/ggml/src/ggml-sycl/mmq.hpp +33 -0
  560. data/ext/{ggml → sources/ggml}/src/ggml-sycl/mmvq.cpp +261 -166
  561. data/ext/sources/ggml/src/ggml-sycl/mmvq.hpp +27 -0
  562. data/ext/{ggml → sources/ggml}/src/ggml-sycl/norm.cpp +204 -81
  563. data/ext/sources/ggml/src/ggml-sycl/norm.hpp +26 -0
  564. data/ext/{ggml → sources/ggml}/src/ggml-sycl/outprod.cpp +8 -17
  565. data/ext/sources/ggml/src/ggml-sycl/outprod.hpp +10 -0
  566. data/ext/sources/ggml/src/ggml-sycl/presets.hpp +74 -0
  567. data/ext/sources/ggml/src/ggml-sycl/quants.hpp +83 -0
  568. data/ext/sources/ggml/src/ggml-sycl/rope.cpp +361 -0
  569. data/ext/sources/ggml/src/ggml-sycl/rope.hpp +20 -0
  570. data/ext/{ggml → sources/ggml}/src/ggml-sycl/softmax.cpp +35 -25
  571. data/ext/sources/ggml/src/ggml-sycl/softmax.hpp +20 -0
  572. data/ext/sources/ggml/src/ggml-sycl/sycl_hw.cpp +13 -0
  573. data/ext/sources/ggml/src/ggml-sycl/sycl_hw.hpp +23 -0
  574. data/ext/{ggml → sources/ggml}/src/ggml-sycl/tsembd.cpp +3 -3
  575. data/ext/sources/ggml/src/ggml-sycl/tsembd.hpp +20 -0
  576. data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +1215 -0
  577. data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +293 -0
  578. data/ext/sources/ggml/src/ggml-sycl/wkv.hpp +10 -0
  579. data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +196 -0
  580. data/ext/sources/ggml/src/ggml-vulkan/cmake/host-toolchain.cmake.in +15 -0
  581. data/ext/{ggml → sources/ggml}/src/ggml-vulkan/ggml-vulkan.cpp +3130 -1087
  582. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +39 -0
  583. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +29 -0
  584. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +29 -0
  585. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +51 -0
  586. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +69 -0
  587. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp +17 -0
  588. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp +41 -0
  589. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp +49 -0
  590. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp +105 -0
  591. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp +23 -0
  592. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +51 -0
  593. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +242 -0
  594. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp +17 -0
  595. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/count_equal.comp +31 -0
  596. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp +20 -0
  597. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +462 -0
  598. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +699 -0
  599. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_head.comp +13 -0
  600. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_m.comp +42 -0
  601. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_s.comp +35 -0
  602. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +44 -0
  603. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp +43 -0
  604. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +48 -0
  605. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +39 -0
  606. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +49 -0
  607. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp +32 -0
  608. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp +34 -0
  609. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +34 -0
  610. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +42 -0
  611. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp +30 -0
  612. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp +32 -0
  613. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +68 -0
  614. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp +34 -0
  615. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp +35 -0
  616. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +70 -0
  617. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +33 -0
  618. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp +31 -0
  619. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp +34 -0
  620. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/div.comp +27 -0
  621. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +337 -0
  622. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.comp +162 -0
  623. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +360 -0
  624. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +267 -0
  625. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +59 -0
  626. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp +25 -0
  627. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp +23 -0
  628. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp +64 -0
  629. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_head.comp +9 -0
  630. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp +76 -0
  631. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +33 -0
  632. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +41 -0
  633. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp +66 -0
  634. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +100 -0
  635. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +41 -0
  636. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp +22 -0
  637. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp +27 -0
  638. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp +48 -0
  639. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +169 -0
  640. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +118 -0
  641. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_m.comp +82 -0
  642. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp +79 -0
  643. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_s.comp +90 -0
  644. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xs.comp +87 -0
  645. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xxs.comp +87 -0
  646. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_s.comp +90 -0
  647. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_xxs.comp +88 -0
  648. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +118 -0
  649. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp +154 -0
  650. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +130 -0
  651. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +132 -0
  652. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +136 -0
  653. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +167 -0
  654. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +130 -0
  655. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +868 -0
  656. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +441 -0
  657. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +442 -0
  658. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +99 -0
  659. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp +44 -0
  660. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_adamw.comp +42 -0
  661. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +28 -0
  662. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp +74 -0
  663. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +77 -0
  664. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp +21 -0
  665. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp +26 -0
  666. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat_back.comp +37 -0
  667. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +52 -0
  668. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_back.comp +55 -0
  669. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +58 -0
  670. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +60 -0
  671. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +43 -0
  672. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +43 -0
  673. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +47 -0
  674. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +24 -0
  675. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sigmoid.comp +20 -0
  676. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp +22 -0
  677. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu_back.comp +26 -0
  678. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp +17 -0
  679. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +173 -0
  680. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +50 -0
  681. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/square.comp +17 -0
  682. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sub.comp +29 -0
  683. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +37 -0
  684. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp +20 -0
  685. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_bfloat16_support.comp +7 -0
  686. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp +7 -0
  687. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp +7 -0
  688. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_integer_dot_support.comp +7 -0
  689. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +41 -0
  690. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +1373 -0
  691. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +36 -0
  692. data/ext/{ggml → sources/ggml}/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +193 -35
  693. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp +87 -0
  694. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/wkv7.comp +91 -0
  695. data/ext/{ggml → sources/ggml}/src/ggml.c +676 -1820
  696. data/ext/sources/ggml/src/gguf.cpp +1330 -0
  697. data/ext/{include → sources/include}/whisper.h +68 -2
  698. data/ext/sources/src/CMakeLists.txt +143 -0
  699. data/ext/{src → sources/src}/coreml/whisper-decoder-impl.h +27 -15
  700. data/ext/{src → sources/src}/coreml/whisper-decoder-impl.m +35 -10
  701. data/ext/{src → sources/src}/coreml/whisper-encoder-impl.h +21 -9
  702. data/ext/{src → sources/src}/coreml/whisper-encoder-impl.m +28 -3
  703. data/ext/sources/src/coreml/whisper-encoder.mm +73 -0
  704. data/ext/sources/src/whisper-arch.h +197 -0
  705. data/ext/{src → sources/src}/whisper.cpp +1905 -374
  706. data/ext/sources/tests/CMakeLists.txt +105 -0
  707. data/ext/sources/tests/earnings21/eval.mk +58 -0
  708. data/ext/sources/tests/earnings21/eval.py +68 -0
  709. data/ext/sources/tests/earnings21/normalizers/__init__.py +2 -0
  710. data/ext/sources/tests/earnings21/normalizers/basic.py +80 -0
  711. data/ext/sources/tests/earnings21/normalizers/english.json +1741 -0
  712. data/ext/sources/tests/earnings21/normalizers/english.py +550 -0
  713. data/ext/sources/tests/earnings21/requirements.txt +6 -0
  714. data/ext/sources/tests/en-0-ref.txt +1 -0
  715. data/ext/sources/tests/en-1-ref.txt +1 -0
  716. data/ext/sources/tests/en-2-ref.txt +1 -0
  717. data/ext/sources/tests/es-0-ref.txt +1 -0
  718. data/ext/sources/tests/librispeech/eval.mk +39 -0
  719. data/ext/sources/tests/librispeech/eval.py +47 -0
  720. data/ext/sources/tests/librispeech/normalizers/__init__.py +2 -0
  721. data/ext/sources/tests/librispeech/normalizers/basic.py +80 -0
  722. data/ext/sources/tests/librispeech/normalizers/english.json +1741 -0
  723. data/ext/sources/tests/librispeech/normalizers/english.py +550 -0
  724. data/ext/sources/tests/librispeech/requirements.txt +6 -0
  725. data/ext/sources/tests/run-tests.sh +130 -0
  726. data/ext/sources/tests/test-c.c +3 -0
  727. data/ext/sources/tests/test-vad-full.cpp +54 -0
  728. data/ext/sources/tests/test-vad.cpp +83 -0
  729. data/ext/sources/tests/test-whisper.js +58 -0
  730. data/extsources.rb +33 -5
  731. data/lib/whisper/model/uri.rb +149 -128
  732. data/sig/whisper.rbs +480 -0
  733. data/tests/helper.rb +28 -0
  734. data/tests/test_callback.rb +45 -3
  735. data/tests/test_error.rb +2 -2
  736. data/tests/test_model.rb +38 -0
  737. data/tests/test_package.rb +18 -3
  738. data/tests/test_params.rb +145 -8
  739. data/tests/test_segment.rb +10 -19
  740. data/tests/test_vad.rb +19 -0
  741. data/tests/test_vad_params.rb +103 -0
  742. data/tests/test_whisper.rb +37 -37
  743. data/whispercpp.gemspec +5 -4
  744. metadata +766 -111
  745. data/ext/cpu.mk +0 -9
  746. data/ext/examples/dr_wav.h +0 -8815
  747. data/ext/ggml/src/ggml-cann/aclnn_ops.h +0 -592
  748. data/ext/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +0 -4262
  749. data/ext/ggml/src/ggml-cpu/ggml-cpu.c +0 -14123
  750. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.cpp +0 -1884
  751. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.h +0 -14
  752. data/ext/ggml/src/ggml-metal/ggml-metal-impl.h +0 -288
  753. data/ext/ggml/src/ggml-sycl/element_wise.cpp +0 -1030
  754. data/ext/ggml/src/ggml-sycl/im2col.cpp +0 -126
  755. data/ext/ggml/src/ggml-sycl/rope.cpp +0 -276
  756. data/ext/ggml/src/ggml-sycl/wkv6.cpp +0 -141
  757. data/ext/metal-embed.mk +0 -17
  758. data/ext/metal.mk +0 -6
  759. data/ext/ruby_whisper.cpp +0 -1909
  760. data/ext/scripts/get-flags.mk +0 -38
  761. data/lib/whisper.rb +0 -2
  762. /data/ext/{ggml → sources/ggml}/include/ggml-blas.h +0 -0
  763. /data/ext/{ggml → sources/ggml}/include/ggml-cann.h +0 -0
  764. /data/ext/{ggml → sources/ggml}/include/ggml-cuda.h +0 -0
  765. /data/ext/{ggml → sources/ggml}/include/ggml-kompute.h +0 -0
  766. /data/ext/{ggml → sources/ggml}/include/ggml-opencl.h +0 -0
  767. /data/ext/{ggml → sources/ggml}/include/ggml-sycl.h +0 -0
  768. /data/ext/{ggml → sources/ggml}/src/ggml-amx/common.h +0 -0
  769. /data/ext/{ggml → sources/ggml}/src/ggml-amx/ggml-amx.cpp +0 -0
  770. /data/ext/{ggml → sources/ggml}/src/ggml-amx/mmq.cpp +0 -0
  771. /data/ext/{ggml → sources/ggml}/src/ggml-amx/mmq.h +0 -0
  772. /data/ext/{ggml → sources/ggml}/src/ggml-blas/ggml-blas.cpp +0 -0
  773. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/ascendc_kernels.h +0 -0
  774. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_f16.cpp +0 -0
  775. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_f32.cpp +0 -0
  776. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_q4_0.cpp +0 -0
  777. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_q8_0.cpp +0 -0
  778. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +0 -0
  779. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +0 -0
  780. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +0 -0
  781. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/amx.h +0 -0
  782. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/common.h +0 -0
  783. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/mmq.cpp +0 -0
  784. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/mmq.h +0 -0
  785. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-aarch64.h +0 -0
  786. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-hbm.cpp +0 -0
  787. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-hbm.h +0 -0
  788. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-quants.h +0 -0
  789. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-traits.cpp +0 -0
  790. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-traits.h +0 -0
  791. /data/ext/{ggml → sources/ggml}/src/ggml-kompute/ggml-kompute.cpp +0 -0
  792. /data/ext/{ggml → sources/ggml}/src/ggml-quants.h +0 -0
  793. /data/ext/{ggml → sources/ggml}/src/ggml-threading.cpp +0 -0
  794. /data/ext/{ggml → sources/ggml}/src/ggml-threading.h +0 -0
  795. /data/ext/{src → sources/src}/coreml/whisper-encoder.h +0 -0
  796. /data/ext/{src → sources/src}/openvino/whisper-openvino-encoder.cpp +0 -0
  797. /data/ext/{src → sources/src}/openvino/whisper-openvino-encoder.h +0 -0
@@ -0,0 +1,2739 @@
1
+ #include "llama-kv-cache.h"
2
+
3
+ #include "llama-impl.h"
4
+ #include "llama-batch.h"
5
+ #include "llama-cparams.h"
6
+ #include "llama-model.h"
7
+ #include "llama-context.h"
8
+
9
+ #include <algorithm>
10
+ #include <cassert>
11
+ #include <cmath>
12
+ #include <limits>
13
+ #include <map>
14
+ #include <stdexcept>
15
+
16
+ //
17
+ // llama_kv_cache_unified
18
+ //
19
+
20
+ uint32_t llama_kv_cache_unified::get_padding(const llama_cparams & cparams) {
21
+ // the FA kernels require padding to avoid extra runtime boundary checks
22
+ return cparams.flash_attn ? 256u : 32u;
23
+ }
24
+
25
+ llama_kv_cache_unified::llama_kv_cache_unified(
26
+ const llama_model & model,
27
+ layer_filter_cb && filter,
28
+ ggml_type type_k,
29
+ ggml_type type_v,
30
+ bool v_trans,
31
+ bool offload,
32
+ uint32_t kv_size,
33
+ uint32_t n_seq_max,
34
+ uint32_t n_pad,
35
+ uint32_t n_swa,
36
+ llama_swa_type swa_type) :
37
+ model(model), hparams(model.hparams), v_trans(v_trans),
38
+ n_seq_max(n_seq_max), n_pad(n_pad), n_swa(n_swa), swa_type(swa_type) {
39
+
40
+ GGML_ASSERT(kv_size % n_pad == 0);
41
+
42
+ // create a context for each buffer type
43
+ std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
44
+ auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
45
+ auto it = ctx_map.find(buft);
46
+ if (it == ctx_map.end()) {
47
+ ggml_init_params params = {
48
+ /*.mem_size =*/ size_t(2u*hparams.n_layer*ggml_tensor_overhead()),
49
+ /*.mem_buffer =*/ NULL,
50
+ /*.no_alloc =*/ true,
51
+ };
52
+
53
+ ggml_context * ctx = ggml_init(params);
54
+ if (!ctx) {
55
+ return nullptr;
56
+ }
57
+
58
+ ctx_map[buft] = ctx;
59
+ ctxs.emplace_back(ctx);
60
+
61
+ return ctx;
62
+ }
63
+
64
+ return it->second;
65
+ };
66
+
67
+ head = 0;
68
+
69
+ cells.resize(kv_size);
70
+
71
+ for (uint32_t il = 0; il < hparams.n_layer; il++) {
72
+ if (filter && !filter(il)) {
73
+ LLAMA_LOG_DEBUG("%s: layer %3d: skipped\n", __func__, il);
74
+ continue;
75
+ }
76
+
77
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
78
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
79
+
80
+ const char * dev_name = "CPU";
81
+
82
+ ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type();
83
+
84
+ if (offload) {
85
+ auto * dev = model.dev_layer(il);
86
+ buft = ggml_backend_dev_buffer_type(dev);
87
+
88
+ dev_name = ggml_backend_dev_name(dev);
89
+ }
90
+
91
+ LLAMA_LOG_DEBUG("%s: layer %3d: dev = %s\n", __func__, il, dev_name);
92
+
93
+ ggml_context * ctx = ctx_for_buft(buft);
94
+ if (!ctx) {
95
+ throw std::runtime_error("failed to create ggml context for kv cache");
96
+ }
97
+
98
+ ggml_tensor * k;
99
+ ggml_tensor * v;
100
+
101
+ k = ggml_new_tensor_2d(ctx, type_k, n_embd_k_gqa, kv_size);
102
+ v = ggml_new_tensor_2d(ctx, type_v, n_embd_v_gqa, kv_size);
103
+
104
+ ggml_format_name(k, "cache_k_l%d", il);
105
+ ggml_format_name(v, "cache_v_l%d", il);
106
+
107
+ map_layer_ids[il] = layers.size();
108
+ layers.push_back({ il, k, v });
109
+ }
110
+
111
+ // allocate tensors and initialize the buffers to avoid NaNs in the padding
112
+ for (auto it : ctx_map) {
113
+ auto * buft = it.first;
114
+ auto * ctx = it.second;
115
+
116
+ ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
117
+ if (!buf) {
118
+ throw std::runtime_error("failed to allocate buffer for kv cache");
119
+ }
120
+
121
+ LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
122
+
123
+ ggml_backend_buffer_clear(buf, 0);
124
+ bufs.emplace_back(buf);
125
+ }
126
+
127
+ {
128
+ const size_t memory_size_k = size_k_bytes();
129
+ const size_t memory_size_v = size_v_bytes();
130
+
131
+ LLAMA_LOG_INFO("%s: size = %7.2f MiB (%6u cells, %3d layers, %2u seqs), K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
132
+ (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f), kv_size, (int) layers.size(), n_seq_max,
133
+ ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
134
+ ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
135
+ }
136
+ }
137
+
138
+ void llama_kv_cache_unified::clear() {
139
+ cells.reset();
140
+
141
+ head = 0;
142
+
143
+ for (auto & buf : bufs) {
144
+ ggml_backend_buffer_clear(buf.get(), 0);
145
+ }
146
+ }
147
+
148
+ bool llama_kv_cache_unified::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
149
+ uint32_t new_head = cells.size();
150
+
151
+ if (p0 < 0) {
152
+ p0 = 0;
153
+ }
154
+
155
+ if (p1 < 0) {
156
+ p1 = std::numeric_limits<llama_pos>::max();
157
+ }
158
+
159
+ for (uint32_t i = 0; i < cells.size(); ++i) {
160
+ if (!cells.pos_in(i, p0, p1)) {
161
+ continue;
162
+ }
163
+
164
+ if (cells.seq_has(i, seq_id) && cells.seq_rm(i, seq_id)) {
165
+ if (new_head == cells.size()) {
166
+ new_head = i;
167
+ }
168
+ }
169
+ }
170
+
171
+ // If we freed up a slot, set head to it so searching can start there.
172
+ if (new_head != cells.size() && new_head < head) {
173
+ head = new_head;
174
+ }
175
+
176
+ return true;
177
+ }
178
+
179
+ void llama_kv_cache_unified::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
180
+ if (seq_id_src == seq_id_dst) {
181
+ return;
182
+ }
183
+
184
+ if (p0 < 0) {
185
+ p0 = 0;
186
+ }
187
+
188
+ if (p1 < 0) {
189
+ p1 = std::numeric_limits<llama_pos>::max();
190
+ }
191
+
192
+ for (uint32_t i = 0; i < cells.size(); ++i) {
193
+ if (!cells.pos_in(i, p0, p1)) {
194
+ continue;
195
+ }
196
+
197
+ if (cells.seq_has(i, seq_id_src)) {
198
+ cells.seq_add(i, seq_id_dst);
199
+ }
200
+ }
201
+ }
202
+
203
+ void llama_kv_cache_unified::seq_keep(llama_seq_id seq_id) {
204
+ uint32_t new_head = cells.size();
205
+
206
+ for (uint32_t i = 0; i < cells.size(); ++i) {
207
+ if (cells.seq_keep(i, seq_id)) {
208
+ if (new_head == cells.size()) {
209
+ new_head = i;
210
+ }
211
+ }
212
+ }
213
+
214
+ // If we freed up a slot, set head to it so searching can start there.
215
+ if (new_head != cells.size() && new_head < head) {
216
+ head = new_head;
217
+ }
218
+ }
219
+
220
+ void llama_kv_cache_unified::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
221
+ if (shift == 0) {
222
+ return;
223
+ }
224
+
225
+ uint32_t new_head = cells.size();
226
+
227
+ if (p0 < 0) {
228
+ p0 = 0;
229
+ }
230
+
231
+ if (p1 < 0) {
232
+ p1 = std::numeric_limits<llama_pos>::max();
233
+ }
234
+
235
+ // If there is no range then return early to avoid looping over all cells.
236
+ if (p0 == p1) {
237
+ return;
238
+ }
239
+
240
+ for (uint32_t i = 0; i < cells.size(); ++i) {
241
+ if (!cells.pos_in(i, p0, p1)) {
242
+ continue;
243
+ }
244
+
245
+ if (cells.seq_has(i, seq_id)) {
246
+ if (cells.pos_add(i, shift)) {
247
+ if (new_head == cells.size()) {
248
+ new_head = i;
249
+ }
250
+ }
251
+ }
252
+ }
253
+
254
+ // If we freed up a slot, set head to it so searching can start there.
255
+ // Otherwise we just start the next search from the beginning.
256
+ head = new_head != cells.size() ? new_head : 0;
257
+ }
258
+
259
+ void llama_kv_cache_unified::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
260
+ if (d == 1) {
261
+ return;
262
+ }
263
+
264
+ if (p0 < 0) {
265
+ p0 = 0;
266
+ }
267
+
268
+ if (p1 < 0) {
269
+ p1 = std::numeric_limits<llama_pos>::max();
270
+ }
271
+
272
+ // If there is no range then return early to avoid looping over the cache.
273
+ if (p0 == p1) {
274
+ return;
275
+ }
276
+
277
+ for (uint32_t i = 0; i < cells.size(); ++i) {
278
+ if (!cells.pos_in(i, p0, p1)) {
279
+ continue;
280
+ }
281
+
282
+ if (cells.seq_has(i, seq_id)) {
283
+ cells.pos_div(i, d);
284
+ }
285
+ }
286
+ }
287
+
288
+ llama_pos llama_kv_cache_unified::seq_pos_min(llama_seq_id seq_id) const {
289
+ return cells.seq_pos_min(seq_id);
290
+ }
291
+
292
+ llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) const {
293
+ return cells.seq_pos_max(seq_id);
294
+ }
295
+
296
+ void llama_kv_cache_unified::restore() {
297
+ for (auto & state : recovery.states) {
298
+ cells.set(state.i, state.cells);
299
+ }
300
+
301
+ recovery.clear();
302
+ }
303
+
304
+ void llama_kv_cache_unified::commit() {
305
+ if (recovery.states.empty()) {
306
+ LLAMA_LOG_WARN("%s: the recovery information upon a commit was empty - might indicate a bug (ref: %s)\n",
307
+ __func__, "https://github.com/ggml-org/llama.cpp/pull/13194");
308
+ return;
309
+ }
310
+
311
+ recovery.clear();
312
+ }
313
+
314
+ bool llama_kv_cache_unified::update(llama_context & lctx) {
315
+ bool need_reserve = false;
316
+
317
+ auto * sched = lctx.get_sched();
318
+
319
+ if (cells.get_has_shift()) {
320
+ if (!get_can_shift()) {
321
+ GGML_ABORT("The current KV cache / model configuration does not support K-shift");
322
+ }
323
+
324
+ LLAMA_LOG_DEBUG("%s: applying K-shift\n", __func__);
325
+
326
+ // apply K-shift if needed
327
+ if (hparams.rope_type != LLAMA_ROPE_TYPE_NONE) {
328
+ ggml_backend_sched_reset(sched);
329
+
330
+ auto * gf = lctx.graph_init();
331
+
332
+ auto res = build_graph_shift(lctx.get_cparams(), lctx.get_ctx_compute(), gf);
333
+
334
+ ggml_backend_sched_alloc_graph(sched, gf);
335
+
336
+ res->set_inputs(nullptr);
337
+
338
+ lctx.graph_compute(gf, false);
339
+
340
+ need_reserve = true;
341
+ }
342
+
343
+ cells.reset_shift();
344
+ }
345
+
346
+ if (do_defrag) {
347
+ LLAMA_LOG_DEBUG("%s: defragmenting KV cache\n", __func__);
348
+
349
+ if (defrag_prepare(lctx.graph_max_nodes())) {
350
+ ggml_backend_sched_reset(sched);
351
+
352
+ auto * gf = lctx.graph_init();
353
+
354
+ auto res = build_graph_defrag(lctx.get_cparams(), lctx.get_ctx_compute(), gf);
355
+
356
+ ggml_backend_sched_alloc_graph(sched, gf);
357
+
358
+ res->set_inputs(nullptr);
359
+
360
+ lctx.graph_compute(gf, false);
361
+
362
+ need_reserve = true;
363
+ }
364
+
365
+ do_defrag = false;
366
+ }
367
+
368
+ return need_reserve;
369
+ }
370
+
371
+ void llama_kv_cache_unified::defrag_sched(float thold) {
372
+ // - do not defrag small contexts (i.e. < 2048 tokens)
373
+ // - count the padding towards the number of used tokens
374
+ const float fragmentation = n >= 2048 ? std::max(0.0f, 1.0f - (float(cells.get_used() + n_pad)/n)) : 0.0f;
375
+
376
+ // queue defragmentation for next llama_kv_cache_update
377
+ if (fragmentation > thold) {
378
+ LLAMA_LOG_DEBUG("%s: fragmentation: %.2f - requesting defrag\n", __func__, fragmentation);
379
+
380
+ do_defrag = true;
381
+ }
382
+ }
383
+
384
+ void llama_kv_cache_unified::set_full() {
385
+ n = cells.size();
386
+
387
+ // when simulating a full KV cache, the specific value of the "head" pointer is not important because it does not
388
+ // affect the shapes of the tensors in the compute graph - it only affects the offsets of the K/V views.
389
+ // we should only guarantee that the head position won't cause out-of-bounds view of the K, V tensors, so
390
+ // setting it to 0 is the simplest way to achieve that
391
+ // ref: https://github.com/ggml-org/llama.cpp/issues/13359
392
+ head = 0;
393
+ }
394
+
395
+ llama_sbatch llama_kv_cache_unified::sbatch_init(const llama_batch & batch, bool logits_all) {
396
+ return llama_sbatch(batch, hparams.n_embd, true, logits_all);
397
+ }
398
+
399
+ llama_ubatch llama_kv_cache_unified::ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const {
400
+ GGML_UNUSED(embd_pooled);
401
+ return sbatch.split_simple(n_ubatch);
402
+ }
403
+
404
+ bool llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) {
405
+ const uint32_t n_tokens = ubatch.n_tokens;
406
+
407
+ // if we have enough unused cells before the current head ->
408
+ // better to start searching from the beginning of the cache, hoping to fill it
409
+ if (head > cells.get_used() + 2*ubatch.n_tokens) {
410
+ head = 0;
411
+ }
412
+
413
+ // otherwise, one cell per token.
414
+
415
+ if (n_tokens > cells.size()) {
416
+ LLAMA_LOG_ERROR("%s: n_tokens = %d > size = %u\n", __func__, n_tokens, cells.size());
417
+ return false;
418
+ }
419
+
420
+ //#define FIND_SLOT_DEBUG 1
421
+ #if FIND_SLOT_DEBUG
422
+ LLAMA_LOG_WARN("begin: n = %5d, used = %5d, head = %5d, n_swa = %5d\n", n, used, head, n_swa);
423
+
424
+ // for debugging
425
+ {
426
+ std::string ss;
427
+ if (n_swa > 0) {
428
+ for (uint32_t i = 0; i < size; ++i) {
429
+ if (cells.is_empty(i)) {
430
+ ss += '.';
431
+ } else {
432
+ ss += 'x';
433
+ }
434
+ if (i%256 == 255) {
435
+ ss += '\n';
436
+ }
437
+ }
438
+ }
439
+ LLAMA_LOG_WARN("\n%s\n", ss.c_str());
440
+ }
441
+ #endif
442
+
443
+ uint32_t n_tested = 0;
444
+
445
+ while (true) {
446
+ if (head + n_tokens > cells.size()) {
447
+ n_tested += cells.size() - head;
448
+ head = 0;
449
+ continue;
450
+ }
451
+
452
+ bool found = true;
453
+ for (uint32_t i = 0; i < n_tokens; i++) {
454
+ // TODO: improve to accept cells that are masked by the SWA
455
+ if (!cells.is_empty(head + i)) {
456
+ found = false;
457
+ head += i + 1;
458
+ n_tested += i + 1;
459
+ break;
460
+ }
461
+ }
462
+
463
+ if (found) {
464
+ break;
465
+ }
466
+
467
+ if (n_tested >= cells.size()) {
468
+ //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
469
+ return false;
470
+ }
471
+ }
472
+
473
+ // store the old state of the cells in the recovery stack
474
+ recovery.states.push_back({head, cells.cp(head, n_tokens)});
475
+
476
+ for (uint32_t i = 0; i < n_tokens; ++i) {
477
+ cells.pos_set(head + i, ubatch.pos[i]);
478
+
479
+ for (int32_t j = 0; j < ubatch.n_seq_id[i]; j++) {
480
+ cells.seq_add(head + i, ubatch.seq_id[i][j]);
481
+ }
482
+ }
483
+
484
+ // a heuristic, to avoid attending the full cache if it is not yet utilized
485
+ // after enough generations, the benefit from this heuristic disappears
486
+ // if we start defragmenting the cache, the benefit from this will be more important
487
+ n = std::min(cells.size(), std::max(n_pad, GGML_PAD(cells.used_max_p1(), n_pad)));
488
+
489
+ #ifdef FIND_SLOT_DEBUG
490
+ LLAMA_LOG_WARN("end: n = %5d, used = %5d, head = %5d, n_swa = %5d\n", n, used, head, n_swa);
491
+ #endif
492
+
493
+ return true;
494
+ }
495
+
496
+ bool llama_kv_cache_unified::get_can_shift() const {
497
+ return true;
498
+ }
499
+
500
+ uint32_t llama_kv_cache_unified::get_n() const {
501
+ return n;
502
+ }
503
+
504
+ uint32_t llama_kv_cache_unified::get_size() const {
505
+ return cells.size();
506
+ }
507
+
508
+ ggml_tensor * llama_kv_cache_unified::get_k(ggml_context * ctx, int32_t il) const {
509
+ const int32_t ikv = map_layer_ids.at(il);
510
+
511
+ auto * k = layers[ikv].k;
512
+
513
+ return ggml_view_3d(ctx, k,
514
+ hparams.n_embd_head_k, hparams.n_head_kv(il), n,
515
+ ggml_row_size(k->type, hparams.n_embd_head_k),
516
+ ggml_row_size(k->type, hparams.n_embd_k_gqa(il)),
517
+ 0);
518
+ }
519
+
520
+ ggml_tensor * llama_kv_cache_unified::get_v(ggml_context * ctx, int32_t il) const {
521
+ const int32_t ikv = map_layer_ids.at(il);
522
+
523
+ auto * v = layers[ikv].v;
524
+
525
+ if (!v_trans) {
526
+ // note: v->nb[1] <= v->nb[2]
527
+ return ggml_view_3d(ctx, v,
528
+ hparams.n_embd_head_v, hparams.n_head_kv(il), n,
529
+ ggml_row_size(v->type, hparams.n_embd_head_v), // v->nb[1]
530
+ ggml_row_size(v->type, hparams.n_embd_v_gqa(il)), // v->nb[2]
531
+ 0);
532
+ }
533
+
534
+ // note: v->nb[1] > v->nb[2]
535
+ return ggml_view_3d(ctx, v,
536
+ n, hparams.n_head_kv(il), hparams.n_embd_head_v,
537
+ ggml_row_size(v->type, v->ne[1]*hparams.n_embd_head_v), // v->nb[1]
538
+ ggml_row_size(v->type, v->ne[1]), // v->nb[2]
539
+ 0);
540
+ }
541
+
542
+ ggml_tensor * llama_kv_cache_unified::cpy_k(ggml_context * ctx, ggml_tensor * k_cur, int32_t il) const {
543
+ const int32_t ikv = map_layer_ids.at(il);
544
+
545
+ auto * k = layers[ikv].k;
546
+
547
+ const int64_t n_tokens = k_cur->ne[2];
548
+
549
+ ggml_tensor * k_view = ggml_view_1d(ctx, k,
550
+ n_tokens*hparams.n_embd_k_gqa(il),
551
+ ggml_row_size(k->type, hparams.n_embd_k_gqa(il))*head);
552
+
553
+ return ggml_cpy(ctx, k_cur, k_view);
554
+ }
555
+
556
+ ggml_tensor * llama_kv_cache_unified::cpy_v(ggml_context * ctx, ggml_tensor * v_cur, int32_t il) const {
557
+ const int32_t ikv = map_layer_ids.at(il);
558
+
559
+ auto * v = layers[ikv].v;
560
+
561
+ const int64_t n_tokens = v_cur->ne[2];
562
+
563
+ v_cur = ggml_reshape_2d(ctx, v_cur, hparams.n_embd_v_gqa(il), n_tokens);
564
+
565
+ ggml_tensor * v_view = nullptr;
566
+
567
+ if (!v_trans) {
568
+ v_view = ggml_view_1d(ctx, v,
569
+ n_tokens*hparams.n_embd_v_gqa(il),
570
+ ggml_row_size(v->type, hparams.n_embd_v_gqa(il))*head);
571
+ } else {
572
+ // note: the V cache is transposed when not using flash attention
573
+ v_view = ggml_view_2d(ctx, v, n_tokens, hparams.n_embd_v_gqa(il),
574
+ (v->ne[1])*ggml_element_size(v),
575
+ ( head)*ggml_element_size(v));
576
+
577
+ v_cur = ggml_transpose(ctx, v_cur);
578
+ }
579
+
580
+ return ggml_cpy(ctx, v_cur, v_view);
581
+ }
582
+
583
+ void llama_kv_cache_unified::prune_swa(llama_seq_id seq_id, llama_pos pmin, llama_pos pmax) {
584
+ // no pruning is needed when the cache does not use SWA
585
+ GGML_ASSERT(swa_type != LLAMA_SWA_TYPE_NONE && "do not prune non-SWA cache");
586
+
587
+ int n_attended = 0;
588
+
589
+ for (uint32_t i = 0; i < cells.size(); ++i) {
590
+ if (!cells.seq_has(i, seq_id)) {
591
+ continue;
592
+ }
593
+
594
+ const llama_pos p0 = cells.pos_get(i);
595
+
596
+ if (p0 <= pmin && !is_masked_swa(p0, pmin)) {
597
+ n_attended++;
598
+ }
599
+
600
+ if (is_masked_swa(p0, pmax)) {
601
+ cells.seq_rm(i, seq_id);
602
+ }
603
+ }
604
+
605
+ if (n_attended < std::min<int>(n_swa, pmin)) {
606
+ LLAMA_LOG_WARN("%s: partial SWA cache detected - possible loss of information, pmin = %d, n_attended = %d, n_swa = %d\n", __func__, pmin, n_attended, n_swa);
607
+ }
608
+ }
609
+
610
+ void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const {
611
+ const int64_t n_tokens = ubatch->n_tokens;
612
+ const int64_t n_seq_tokens = ubatch->n_seq_tokens;
613
+ const int64_t n_seqs = ubatch->n_seqs;
614
+
615
+ GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
616
+ float * data = (float *) dst->data;
617
+
618
+ const int64_t n_kv = n;
619
+
620
+ // Use only the previous KV cells of the correct sequence for each token of the ubatch.
621
+ // It's assumed that if a token in the batch has multiple sequences, they are equivalent.
622
+ // Example with a cache of 10 tokens, 2 tokens populated in cache and 3 tokens in batch:
623
+ // Causal mask:
624
+ // xxx-------
625
+ // xxxx------
626
+ // xxxxx-----
627
+ // Non-causal mask:
628
+ // xxxxx-----
629
+ // xxxxx-----
630
+ // xxxxx-----
631
+ // To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615
632
+ for (int h = 0; h < 1; ++h) {
633
+ for (int s = 0; s < n_seqs; ++s) {
634
+ const llama_seq_id seq_id = ubatch->seq_id[s][0];
635
+
636
+ for (int j = 0; j < n_seq_tokens; ++j) {
637
+ const llama_pos p1 = ubatch->pos[s*n_seq_tokens + j];
638
+
639
+ for (int i = 0; i < n_kv; ++i) {
640
+ float f = 0.0f;
641
+
642
+ bool masked = false;
643
+
644
+ if (cells.is_empty(i)) {
645
+ masked = true;
646
+ } else {
647
+ const llama_pos p0 = cells.pos_get(i);
648
+
649
+ // mask the token if not the same sequence
650
+ masked = masked || (!cells.seq_has(i, seq_id));
651
+
652
+ // mask future tokens
653
+ masked = masked || (causal_attn && p0 > p1);
654
+
655
+ // apply SWA if any
656
+ masked = masked || (is_masked_swa(p0, p1));
657
+
658
+ if (!masked && hparams.use_alibi) {
659
+ f = -std::abs(p0 - p1);
660
+ }
661
+ }
662
+
663
+ if (masked) {
664
+ f = -INFINITY;
665
+ }
666
+
667
+ data[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f;
668
+ }
669
+ }
670
+ }
671
+
672
+ // mask padded tokens
673
+ if (data) {
674
+ for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
675
+ for (int j = 0; j < n_kv; ++j) {
676
+ data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
677
+ }
678
+ }
679
+ }
680
+ }
681
+ }
682
+
683
+ void llama_kv_cache_unified::set_input_k_shift(ggml_tensor * dst) const {
684
+ GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
685
+
686
+ int32_t * data = (int32_t *) dst->data;
687
+
688
+ for (uint32_t i = 0; i < cells.size(); ++i) {
689
+ data[i] = cells.is_empty(i) ? 0 : cells.get_shift(i);
690
+ }
691
+ }
692
+
693
+ void llama_kv_cache_unified::set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const {
694
+ const int64_t n_tokens = ubatch->n_tokens;
695
+
696
+ GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
697
+ GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing
698
+
699
+ int32_t * data = (int32_t *) dst->data;
700
+
701
+ const int64_t n_kv = n;
702
+
703
+ for (int h = 0; h < 1; ++h) {
704
+ for (int j = 0; j < n_tokens; ++j) {
705
+ for (int i = 0; i < n_kv; ++i) {
706
+ // the position when the cells is empty is irrelevant - it will be masked out later in the attention
707
+ const llama_pos p0 = cells.is_empty(i) ? -1 : cells.pos_get(i);
708
+
709
+ data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(p0, ubatch->pos[j], hparams.n_rel_attn_bkts, false);
710
+ }
711
+ }
712
+ }
713
+ }
714
+
715
+ size_t llama_kv_cache_unified::total_size() const {
716
+ size_t size = 0;
717
+
718
+ for (const auto & buf : bufs) {
719
+ size += ggml_backend_buffer_get_size(buf.get());
720
+ }
721
+
722
+ return size;
723
+ }
724
+
725
+ size_t llama_kv_cache_unified::size_k_bytes() const {
726
+ size_t size_k_bytes = 0;
727
+
728
+ for (const auto & layer : layers) {
729
+ size_k_bytes += ggml_nbytes(layer.k);
730
+ }
731
+
732
+ return size_k_bytes;
733
+ }
734
+
735
+ size_t llama_kv_cache_unified::size_v_bytes() const {
736
+ size_t size_v_bytes = 0;
737
+
738
+ for (const auto & layer : layers) {
739
+ size_v_bytes += ggml_nbytes(layer.v);
740
+ }
741
+
742
+ return size_v_bytes;
743
+ }
744
+
745
+ ggml_tensor * llama_kv_cache_unified::build_rope_shift(
746
+ const llama_cparams & cparams,
747
+ ggml_context * ctx,
748
+ ggml_tensor * cur,
749
+ ggml_tensor * shift,
750
+ ggml_tensor * factors,
751
+ float freq_base,
752
+ float freq_scale) const {
753
+ const auto & n_ctx_orig = cparams.n_ctx_orig_yarn;
754
+
755
+ const auto & yarn_ext_factor = cparams.yarn_ext_factor;
756
+ const auto & yarn_beta_fast = cparams.yarn_beta_fast;
757
+ const auto & yarn_beta_slow = cparams.yarn_beta_slow;
758
+
759
+ const auto & n_rot = hparams.n_rot;
760
+ const auto & rope_type = hparams.rope_type;
761
+
762
+ // See llm_build_deepseek2() for why attn_factor has to be scaled for YaRN RoPE to work correctly.
763
+ // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation.
764
+ const float yarn_attn_factor = model.arch == LLM_ARCH_DEEPSEEK2 ? 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)) : cparams.yarn_attn_factor;
765
+
766
+ ggml_tensor * tmp;
767
+
768
+ if (ggml_is_quantized(cur->type)) {
769
+ // dequantize to f32 -> RoPE -> quantize back
770
+ tmp = ggml_cast(ctx, cur, GGML_TYPE_F32);
771
+
772
+ tmp = ggml_rope_ext(ctx, tmp,
773
+ shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
774
+ yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
775
+
776
+ tmp = ggml_cpy(ctx, tmp, cur);
777
+ } else {
778
+ // we rotate only the first n_rot dimensions
779
+ tmp = ggml_rope_ext_inplace(ctx, cur,
780
+ shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
781
+ yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
782
+ }
783
+
784
+ return tmp;
785
+ }
786
+
787
+ class llm_graph_input_k_shift : public llm_graph_input_i {
788
+ public:
789
+ llm_graph_input_k_shift(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {}
790
+ virtual ~llm_graph_input_k_shift() = default;
791
+
792
+ void set_input(const llama_ubatch * ubatch) override;
793
+
794
+ ggml_tensor * k_shift; // I32 [kv_size]
795
+
796
+ const llama_kv_cache_unified * kv_self;
797
+ };
798
+
799
+ void llm_graph_input_k_shift::set_input(const llama_ubatch * ubatch) {
800
+ GGML_UNUSED(ubatch);
801
+
802
+ if (k_shift) {
803
+ kv_self->set_input_k_shift(k_shift);
804
+ }
805
+ }
806
+
807
+ llm_graph_result_ptr llama_kv_cache_unified::build_graph_shift(
808
+ const llama_cparams & cparams,
809
+ ggml_context * ctx,
810
+ ggml_cgraph * gf) const {
811
+ auto res = std::make_unique<llm_graph_result>();
812
+
813
+ const auto & n_embd_head_k = hparams.n_embd_head_k;
814
+ //const auto & n_embd_head_v = hparams.n_embd_head_v;
815
+
816
+ //GGML_ASSERT(kv_self->size == n_ctx);
817
+
818
+ auto inp = std::make_unique<llm_graph_input_k_shift>(this);
819
+
820
+ inp->k_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, cparams.n_ctx);
821
+ ggml_set_input(inp->k_shift);
822
+
823
+ for (const auto & layer : layers) {
824
+ const uint32_t il = layer.il;
825
+
826
+ const int64_t n_head_kv = hparams.n_head_kv(il);
827
+ const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
828
+
829
+ const float freq_base_l = model.get_rope_freq_base (cparams, il);
830
+ const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
831
+
832
+ ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
833
+
834
+ ggml_tensor * k =
835
+ ggml_view_3d(ctx, layer.k,
836
+ n_embd_head_k, n_head_kv, cells.size(),
837
+ ggml_row_size(layer.k->type, n_embd_head_k),
838
+ ggml_row_size(layer.k->type, n_embd_k_gqa),
839
+ 0);
840
+
841
+ ggml_tensor * cur = build_rope_shift(cparams, ctx, k, inp->k_shift, rope_factors, freq_base_l, freq_scale_l);
842
+
843
+ ggml_build_forward_expand(gf, cur);
844
+ }
845
+
846
+ res->add_input(std::move(inp));
847
+
848
+ return res;
849
+ }
850
+
851
+ llm_graph_result_ptr llama_kv_cache_unified::build_graph_defrag(
852
+ const llama_cparams & cparams,
853
+ ggml_context * ctx,
854
+ ggml_cgraph * gf) const {
855
+ auto res = std::make_unique<llm_graph_result>();
856
+
857
+ const auto & ids = defrag_info.ids;
858
+
859
+ #if 0
860
+ // CPU defrag
861
+ //
862
+ // TODO: optimizations are possible:
863
+ // - multiple threads
864
+ // - avoid copying to the host memory when already there
865
+ //
866
+ // likely not worth the effort, as we have ggml_graph based defrag
867
+ //
868
+
869
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa();
870
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa();
871
+
872
+ const uint32_t kv_size = size;
873
+
874
+ std::vector<uint8_t> buf_k;
875
+ std::vector<uint8_t> buf_v;
876
+
877
+ for (uint32_t il = 0; il < n_layer; ++il) {
878
+ const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa);
879
+ const size_t k_size = ggml_row_size(k_l[il]->type, n_embd_k_gqa*kv_size);
880
+
881
+ const size_t v_size_el = ggml_type_size(v_l[il]->type);
882
+ const size_t v_size = ggml_row_size (v_l[il]->type, n_embd_v_gqa*kv_size);
883
+
884
+ buf_k.resize(k_size);
885
+ buf_v.resize(v_size);
886
+
887
+ ggml_backend_tensor_get(k_l[il], buf_k.data(), 0, buf_k.size());
888
+ ggml_backend_tensor_get(v_l[il], buf_v.data(), 0, buf_v.size());
889
+
890
+ // batch move [i, i+nm) to [id, id+nm)
891
+ // note: cells can move only to a lower index
892
+ for (uint32_t i = 0; i < n_kv; ++i) {
893
+ const uint32_t id = ids[i];
894
+
895
+ if (i == id || id == n_kv) {
896
+ continue;
897
+ }
898
+
899
+ uint32_t nm = 1;
900
+
901
+ while (i + nm < n_kv && ids[i + nm] == id + nm) {
902
+ nm++;
903
+ }
904
+
905
+ // move keys
906
+ {
907
+ const int64_t os = i*k_size_row;
908
+ const int64_t od = id*k_size_row;
909
+
910
+ memcpy(buf_k.data() + od, buf_k.data() + os, nm*k_size_row);
911
+ }
912
+
913
+ // move values (note: they are transposed)
914
+ {
915
+ const int64_t os = i;
916
+ const int64_t od = id;
917
+
918
+ for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
919
+ memcpy(buf_v.data() + (od + j*kv_size)*v_size_el, buf_v.data() + (os + j*kv_size)*v_size_el, nm*v_size_el);
920
+ }
921
+ }
922
+
923
+ i += nm - 1;
924
+ }
925
+
926
+ ggml_backend_tensor_set(k_l[il], buf_k.data(), 0, buf_k.size());
927
+ ggml_backend_tensor_set(v_l[il], buf_v.data(), 0, buf_v.size());
928
+ }
929
+ #else
930
+ for (uint32_t i = 0; i < ids.size(); ++i) {
931
+ const uint32_t id = ids[i];
932
+
933
+ if (i == id || id == ids.size()) {
934
+ continue;
935
+ }
936
+
937
+ uint32_t nm = 1;
938
+
939
+ while (i + nm < ids.size() && ids[i + nm] == id + nm) {
940
+ nm++;
941
+ }
942
+
943
+ for (const auto & layer : layers) {
944
+ const uint32_t il = layer.il;
945
+
946
+ const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
947
+ const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
948
+
949
+ ggml_tensor * view_k_src = ggml_view_2d(ctx, layer.k,
950
+ n_embd_k_gqa, nm,
951
+ ggml_row_size(layer.k->type, n_embd_k_gqa),
952
+ ggml_row_size(layer.k->type, n_embd_k_gqa*i));
953
+
954
+ ggml_tensor * view_k_dst = ggml_view_2d(ctx, layer.k,
955
+ n_embd_k_gqa, nm,
956
+ ggml_row_size(layer.k->type, n_embd_k_gqa),
957
+ ggml_row_size(layer.k->type, n_embd_k_gqa*id));
958
+
959
+ ggml_tensor * view_v_src;
960
+ ggml_tensor * view_v_dst;
961
+
962
+ if (cparams.flash_attn) {
963
+ // NOTE: the V cache is not transposed when using flash attention
964
+ view_v_src = ggml_view_2d(ctx, layer.v,
965
+ n_embd_v_gqa, nm,
966
+ ggml_row_size(layer.v->type, n_embd_v_gqa),
967
+ ggml_row_size(layer.v->type, n_embd_v_gqa*i));
968
+
969
+ view_v_dst = ggml_view_2d(ctx, layer.v,
970
+ n_embd_v_gqa, nm,
971
+ ggml_row_size(layer.v->type, n_embd_v_gqa),
972
+ ggml_row_size(layer.v->type, n_embd_v_gqa*id));
973
+ } else {
974
+ view_v_src = ggml_view_2d(ctx, layer.v,
975
+ nm, n_embd_v_gqa,
976
+ ggml_row_size(layer.v->type, cells.size()),
977
+ ggml_row_size(layer.v->type, i));
978
+
979
+ view_v_dst = ggml_view_2d(ctx, layer.v,
980
+ nm, n_embd_v_gqa,
981
+ ggml_row_size(layer.v->type, cells.size()),
982
+ ggml_row_size(layer.v->type, id));
983
+ }
984
+
985
+ ggml_build_forward_expand(gf, ggml_cpy(ctx, view_k_src, view_k_dst));
986
+ ggml_build_forward_expand(gf, ggml_cpy(ctx, view_v_src, view_v_dst));
987
+ }
988
+
989
+ i += nm - 1;
990
+ }
991
+
992
+ //LLAMA_LOG_INFO("gf->n_nodes = %d\n", gf->n_nodes);
993
+ #endif
994
+
995
+ return res;
996
+ }
997
+
998
+ bool llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) {
999
+ const uint32_t n_layer = layers.size();
1000
+
1001
+ const uint32_t n_kv = cells.used_max_p1();
1002
+ const uint32_t n_used = cells.get_used();
1003
+
1004
+ assert(n_used <= n_kv);
1005
+
1006
+ //const int64_t t_start = ggml_time_us();
1007
+
1008
+ // number of cells moved
1009
+ uint32_t n_moves = 0;
1010
+
1011
+ // each move requires 6*n_layer tensors (see graph_build_kv_self_defrag)
1012
+ // - source view, destination view, copy operation
1013
+ // - x2 for keys and values
1014
+ //const uint32_t max_moves = max_nodes()/(6*n_layer);
1015
+ // TODO: tmp fix https://github.com/ggerganov/llama.cpp/issues/6685#issuecomment-2057579516
1016
+ const uint32_t max_moves = (n_max_nodes - 2*n_layer)/(6*n_layer);
1017
+
1018
+ // determine which KV cells to move where
1019
+ //
1020
+ // cell i moves to ids[i]
1021
+ //
1022
+ // if ids[i] == i || ids[i] == n_kv, then cell i is not moved
1023
+ //
1024
+ auto & ids = defrag_info.ids;
1025
+
1026
+ ids.clear();
1027
+ ids.resize(n_kv, n_kv);
1028
+
1029
+ for (uint32_t i0 = 0; i0 < n_used; ++i0) {
1030
+ if (!cells.is_empty(i0)) {
1031
+ ids[i0] = i0;
1032
+
1033
+ continue;
1034
+ }
1035
+
1036
+ // found a hole - fill it with data from the end of the cache
1037
+
1038
+ uint32_t nh = 1;
1039
+
1040
+ // determine the size of the hole
1041
+ while (i0 + nh < n_used && cells.is_empty(i0 + nh)) {
1042
+ nh++;
1043
+ }
1044
+
1045
+ uint32_t nf = 0;
1046
+ uint32_t is = n_kv - 1;
1047
+
1048
+ // starting from the end, find nh non-empty cells
1049
+ for (; is > i0; --is) {
1050
+ if (cells.is_empty(is) || ids[is] != n_kv) {
1051
+ continue;
1052
+ }
1053
+
1054
+ // non-empty cell which is not yet moved
1055
+ nf++;
1056
+
1057
+ if (nf == nh) {
1058
+ break;
1059
+ }
1060
+ }
1061
+
1062
+ // this can only happen if `n_used` is not accurate, which would be a bug
1063
+ GGML_ASSERT(nf == nh && "KV defrag bug: nf != nh");
1064
+
1065
+ nf = 0;
1066
+
1067
+ uint32_t i1 = is;
1068
+
1069
+ // are we moving a continuous block of memory?
1070
+ bool cont = false;
1071
+
1072
+ // should we stop searching for the next move?
1073
+ bool stop = false;
1074
+
1075
+ // go back and move the nf cells to the hole
1076
+ for (; i1 < n_kv; ++i1) {
1077
+ if (cells.is_empty(i1) || ids[i1] != n_kv) {
1078
+ if (n_moves == max_moves) {
1079
+ stop = true;
1080
+ break;
1081
+ }
1082
+
1083
+ cont = false;
1084
+ continue;
1085
+ }
1086
+
1087
+ // this cell goes to (i0 + nf)
1088
+ ids[i1] = i0 + nf;
1089
+
1090
+ // move the cell meta data
1091
+ cells.mv(i1, i0 + nf);
1092
+
1093
+ head = n_used;
1094
+
1095
+ if (!cont) {
1096
+ n_moves++;
1097
+ cont = true;
1098
+ }
1099
+
1100
+ nf++;
1101
+
1102
+ if (nf == nh) {
1103
+ break;
1104
+ }
1105
+ }
1106
+
1107
+ if (stop || n_moves == max_moves) {
1108
+ break;
1109
+ }
1110
+
1111
+ //LLAMA_LOG_INFO("(tmp log) KV defrag: move [%u, %u) to [%u, %u)\n", is, i1 + 1, i0, i0 + nh);
1112
+
1113
+ i0 += nh - 1;
1114
+ }
1115
+
1116
+ if (n_moves == 0) {
1117
+ return false;
1118
+ }
1119
+
1120
+ LLAMA_LOG_DEBUG("%s: (tmp log) KV defrag cell moves: %u\n", __func__, n_moves);
1121
+
1122
+ LLAMA_LOG_DEBUG("%s: expected gf nodes: %u\n", __func__, 6*n_moves*n_layer);
1123
+
1124
+ return true;
1125
+ }
1126
+
1127
+ bool llama_kv_cache_unified::is_masked_swa(llama_pos p0, llama_pos p1) const {
1128
+ assert(p0 >= 0 && p1 >= 0);
1129
+
1130
+ switch (swa_type) {
1131
+ case LLAMA_SWA_TYPE_NONE:
1132
+ {
1133
+ } break;
1134
+ case LLAMA_SWA_TYPE_STANDARD:
1135
+ {
1136
+ if (p1 - p0 >= (int32_t) n_swa) {
1137
+ return true;
1138
+ }
1139
+ } break;
1140
+ case LLAMA_SWA_TYPE_CHUNKED:
1141
+ {
1142
+ const llama_pos pos_chunk_start = (p1 / n_swa) * n_swa;
1143
+
1144
+ if (p0 < pos_chunk_start) {
1145
+ return true;
1146
+ }
1147
+ } break;
1148
+ }
1149
+
1150
+ return false;
1151
+ }
1152
+
1153
+ void llama_kv_cache_unified::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
1154
+ std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
1155
+ uint32_t cell_count = 0;
1156
+
1157
+ // Count the number of cells with the specified seq_id
1158
+ // Find all the ranges of cells with this seq id (or all, when -1)
1159
+ uint32_t cell_range_begin = cells.size();
1160
+
1161
+ for (uint32_t i = 0; i < cells.size(); ++i) {
1162
+ if (!cells.is_empty(i) && (seq_id == -1 || cells.seq_has(i, seq_id))) {
1163
+ ++cell_count;
1164
+ if (cell_range_begin == cells.size()) {
1165
+ cell_range_begin = i;
1166
+ }
1167
+ } else {
1168
+ if (cell_range_begin != cells.size()) {
1169
+ cell_ranges.emplace_back(cell_range_begin, i);
1170
+ cell_range_begin = cells.size();
1171
+ }
1172
+ }
1173
+ }
1174
+
1175
+ if (cell_range_begin != cells.size()) {
1176
+ cell_ranges.emplace_back(cell_range_begin, cells.size());
1177
+ }
1178
+
1179
+ // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
1180
+ uint32_t cell_count_check = 0;
1181
+ for (const auto & range : cell_ranges) {
1182
+ cell_count_check += range.second - range.first;
1183
+ }
1184
+ GGML_ASSERT(cell_count == cell_count_check);
1185
+
1186
+ io.write(&cell_count, sizeof(cell_count));
1187
+
1188
+ state_write_meta(io, cell_ranges, seq_id);
1189
+ state_write_data(io, cell_ranges);
1190
+ }
1191
+
1192
+ void llama_kv_cache_unified::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
1193
+ uint32_t cell_count;
1194
+ io.read_to(&cell_count, sizeof(cell_count));
1195
+
1196
+ bool res = true;
1197
+ res = res && state_read_meta(io, cell_count, seq_id);
1198
+ res = res && state_read_data(io, cell_count);
1199
+
1200
+ if (!res) {
1201
+ if (seq_id == -1) {
1202
+ clear();
1203
+ } else {
1204
+ seq_rm(seq_id, -1, -1);
1205
+ }
1206
+ throw std::runtime_error("failed to restore kv cache");
1207
+ }
1208
+ }
1209
+
1210
+ void llama_kv_cache_unified::state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id) const {
1211
+ for (const auto & range : cell_ranges) {
1212
+ for (uint32_t i = range.first; i < range.second; ++i) {
1213
+ std::vector<llama_seq_id> seq_ids;
1214
+
1215
+ for (llama_seq_id cur = 0; cur < (int) n_seq_max; ++cur) {
1216
+ if (cur == seq_id || seq_id == -1) {
1217
+ if (cells.seq_has(i, cur)) {
1218
+ seq_ids.push_back(cur);
1219
+ }
1220
+ }
1221
+ }
1222
+
1223
+ const llama_pos pos = cells.pos_get(i);
1224
+ const uint32_t n_seq_id = seq_ids.size();
1225
+
1226
+ io.write(&pos, sizeof(pos));
1227
+ io.write(&n_seq_id, sizeof(n_seq_id));
1228
+
1229
+ for (const auto & seq_id : seq_ids) {
1230
+ io.write(&seq_id, sizeof(seq_id));
1231
+ }
1232
+ }
1233
+ }
1234
+ }
1235
+
1236
+ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const {
1237
+ const uint32_t v_trans = this->v_trans ? 1 : 0;
1238
+ const uint32_t n_layer = layers.size();
1239
+
1240
+ io.write(&v_trans, sizeof(v_trans));
1241
+ io.write(&n_layer, sizeof(n_layer));
1242
+
1243
+ std::vector<uint8_t> tmp_buf;
1244
+
1245
+ // Iterate and write all the keys first, each row is a cell
1246
+ // Get whole range at a time
1247
+ for (const auto & layer : layers) {
1248
+ const uint32_t il = layer.il;
1249
+
1250
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
1251
+
1252
+ // Write key type
1253
+ const int32_t k_type_i = (int32_t)layer.k->type;
1254
+ io.write(&k_type_i, sizeof(k_type_i));
1255
+
1256
+ // Write row size of key
1257
+ const uint64_t k_size_row = ggml_row_size(layer.k->type, n_embd_k_gqa);
1258
+ io.write(&k_size_row, sizeof(k_size_row));
1259
+
1260
+ // Read each range of cells of k_size length each into tmp_buf and write out
1261
+ for (const auto & range : cell_ranges) {
1262
+ const size_t range_size = range.second - range.first;
1263
+ const size_t buf_size = range_size * k_size_row;
1264
+ io.write_tensor(layer.k, range.first * k_size_row, buf_size);
1265
+ }
1266
+ }
1267
+
1268
+ if (!v_trans) {
1269
+ for (const auto & layer : layers) {
1270
+ const uint32_t il = layer.il;
1271
+
1272
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
1273
+
1274
+ // Write value type
1275
+ const int32_t v_type_i = (int32_t)layer.v->type;
1276
+ io.write(&v_type_i, sizeof(v_type_i));
1277
+
1278
+ // Write row size of value
1279
+ const uint64_t v_size_row = ggml_row_size(layer.v->type, n_embd_v_gqa);
1280
+ io.write(&v_size_row, sizeof(v_size_row));
1281
+
1282
+ // Read each range of cells of v_size length each into tmp_buf and write out
1283
+ for (const auto & range : cell_ranges) {
1284
+ const size_t range_size = range.second - range.first;
1285
+ const size_t buf_size = range_size * v_size_row;
1286
+ io.write_tensor(layer.v, range.first * v_size_row, buf_size);
1287
+ }
1288
+ }
1289
+ } else {
1290
+ // When v is transposed, we also need the element size and get the element ranges from each row
1291
+ const uint32_t kv_size = cells.size();
1292
+
1293
+ for (const auto & layer : layers) {
1294
+ const uint32_t il = layer.il;
1295
+
1296
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
1297
+
1298
+ // Write value type
1299
+ const int32_t v_type_i = (int32_t)layer.v->type;
1300
+ io.write(&v_type_i, sizeof(v_type_i));
1301
+
1302
+ // Write element size
1303
+ const uint32_t v_size_el = ggml_type_size(layer.v->type);
1304
+ io.write(&v_size_el, sizeof(v_size_el));
1305
+
1306
+ // Write GQA embedding size
1307
+ io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
1308
+
1309
+ // For each row, we get the element values of each cell
1310
+ for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
1311
+ // Read each range of cells of v_size_el length each into tmp_buf and write out
1312
+ for (const auto & range : cell_ranges) {
1313
+ const size_t range_size = range.second - range.first;
1314
+ const size_t src_offset = (range.first + j * kv_size) * v_size_el;
1315
+ const size_t buf_size = range_size * v_size_el;
1316
+ io.write_tensor(layer.v, src_offset, buf_size);
1317
+ }
1318
+ }
1319
+ }
1320
+ }
1321
+ }
1322
+
1323
+ bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) {
1324
+ if (dest_seq_id != -1) {
1325
+ // single sequence
1326
+
1327
+ seq_rm(dest_seq_id, -1, -1);
1328
+
1329
+ llama_sbatch sbatch;
1330
+ llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
1331
+
1332
+ batch.n_tokens = cell_count;
1333
+
1334
+ for (uint32_t i = 0; i < cell_count; ++i) {
1335
+ llama_pos pos;
1336
+ uint32_t n_seq_id;
1337
+
1338
+ io.read_to(&pos, sizeof(pos));
1339
+ io.read_to(&n_seq_id, sizeof(n_seq_id));
1340
+
1341
+ if (n_seq_id != 1) {
1342
+ LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
1343
+ return false;
1344
+ }
1345
+
1346
+ // read the sequence id, but directly discard it - we will use dest_seq_id instead
1347
+ {
1348
+ llama_seq_id seq_id;
1349
+ io.read_to(&seq_id, sizeof(seq_id));
1350
+ }
1351
+
1352
+ batch.pos[i] = pos;
1353
+ batch.n_seq_id[i] = n_seq_id;
1354
+ batch.seq_id[i] = &dest_seq_id;
1355
+ }
1356
+
1357
+ if (!find_slot(batch)) {
1358
+ LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
1359
+ return false;
1360
+ }
1361
+
1362
+ commit();
1363
+
1364
+ // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
1365
+ // Assume that this is one contiguous block of cells
1366
+ GGML_ASSERT(head + cell_count <= cells.size());
1367
+ GGML_ASSERT(cells.pos_get(head) == batch.pos[0]);
1368
+ GGML_ASSERT(cells.pos_get(head + cell_count - 1) == batch.pos[cell_count - 1]);
1369
+ GGML_ASSERT(cells.seq_has(head, dest_seq_id));
1370
+ GGML_ASSERT(cells.seq_has(head + cell_count - 1, dest_seq_id));
1371
+ } else {
1372
+ // whole KV cache restore
1373
+
1374
+ if (cell_count > cells.size()) {
1375
+ LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
1376
+ return false;
1377
+ }
1378
+
1379
+ clear();
1380
+
1381
+ for (uint32_t i = 0; i < cell_count; ++i) {
1382
+ llama_pos pos;
1383
+ uint32_t n_seq_id;
1384
+
1385
+ io.read_to(&pos, sizeof(pos));
1386
+ io.read_to(&n_seq_id, sizeof(n_seq_id));
1387
+
1388
+ cells.pos_set(i, pos);
1389
+
1390
+ for (uint32_t j = 0; j < n_seq_id; ++j) {
1391
+ llama_seq_id seq_id;
1392
+ io.read_to(&seq_id, sizeof(seq_id));
1393
+
1394
+ if (seq_id < 0 || (uint32_t) seq_id >= n_seq_max) {
1395
+ LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, n_seq_max);
1396
+ return false;
1397
+ }
1398
+
1399
+ cells.seq_add(i, seq_id);
1400
+ }
1401
+ }
1402
+
1403
+ head = 0;
1404
+ }
1405
+
1406
+ return true;
1407
+ }
1408
+
1409
+ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell_count) {
1410
+ uint32_t v_trans;
1411
+ uint32_t n_layer;
1412
+
1413
+ io.read_to(&v_trans, sizeof(v_trans));
1414
+ io.read_to(&n_layer, sizeof(n_layer));
1415
+
1416
+ if (n_layer != layers.size()) {
1417
+ LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, (uint32_t) layers.size());
1418
+ return false;
1419
+ }
1420
+ if (cell_count > cells.size()) {
1421
+ LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, cells.size());
1422
+ return false;
1423
+ }
1424
+ if (this->v_trans != (bool) v_trans) {
1425
+ LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
1426
+ return false;
1427
+ }
1428
+
1429
+ // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
1430
+ for (const auto & layer : layers) {
1431
+ const uint32_t il = layer.il;
1432
+
1433
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
1434
+
1435
+ // Read type of key
1436
+ int32_t k_type_i_ref;
1437
+ io.read_to(&k_type_i_ref, sizeof(k_type_i_ref));
1438
+ const int32_t k_type_i = (int32_t) layer.k->type;
1439
+ if (k_type_i != k_type_i_ref) {
1440
+ LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
1441
+ return false;
1442
+ }
1443
+
1444
+ // Read row size of key
1445
+ uint64_t k_size_row_ref;
1446
+ io.read_to(&k_size_row_ref, sizeof(k_size_row_ref));
1447
+ const size_t k_size_row = ggml_row_size(layer.k->type, n_embd_k_gqa);
1448
+ if (k_size_row != k_size_row_ref) {
1449
+ LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
1450
+ return false;
1451
+ }
1452
+
1453
+ if (cell_count) {
1454
+ // Read and set the keys for the whole cell range
1455
+ ggml_backend_tensor_set(layer.k, io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row);
1456
+ }
1457
+ }
1458
+
1459
+ if (!this->v_trans) {
1460
+ for (const auto & layer : layers) {
1461
+ const uint32_t il = layer.il;
1462
+
1463
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
1464
+
1465
+ // Read type of value
1466
+ int32_t v_type_i_ref;
1467
+ io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
1468
+ const int32_t v_type_i = (int32_t)layer.v->type;
1469
+ if (v_type_i != v_type_i_ref) {
1470
+ LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
1471
+ return false;
1472
+ }
1473
+
1474
+ // Read row size of value
1475
+ uint64_t v_size_row_ref;
1476
+ io.read_to(&v_size_row_ref, sizeof(v_size_row_ref));
1477
+ const size_t v_size_row = ggml_row_size(layer.v->type, n_embd_v_gqa);
1478
+ if (v_size_row != v_size_row_ref) {
1479
+ LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
1480
+ return false;
1481
+ }
1482
+
1483
+ if (cell_count) {
1484
+ // Read and set the values for the whole cell range
1485
+ ggml_backend_tensor_set(layer.v, io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row);
1486
+ }
1487
+ }
1488
+ } else {
1489
+ // For each layer, read the values for each cell (transposed)
1490
+ for (const auto & layer : layers) {
1491
+ const uint32_t il = layer.il;
1492
+
1493
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
1494
+
1495
+ // Read type of value
1496
+ int32_t v_type_i_ref;
1497
+ io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
1498
+ const int32_t v_type_i = (int32_t)layer.v->type;
1499
+ if (v_type_i != v_type_i_ref) {
1500
+ LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
1501
+ return false;
1502
+ }
1503
+
1504
+ // Read element size of value
1505
+ uint32_t v_size_el_ref;
1506
+ io.read_to(&v_size_el_ref, sizeof(v_size_el_ref));
1507
+ const size_t v_size_el = ggml_type_size(layer.v->type);
1508
+ if (v_size_el != v_size_el_ref) {
1509
+ LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
1510
+ return false;
1511
+ }
1512
+
1513
+ // Read GQA embedding size
1514
+ uint32_t n_embd_v_gqa_ref;
1515
+ io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref));
1516
+ if (n_embd_v_gqa != n_embd_v_gqa_ref) {
1517
+ LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il);
1518
+ return false;
1519
+ }
1520
+
1521
+ if (cell_count) {
1522
+ // For each row in the transposed matrix, read the values for the whole cell range
1523
+ for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
1524
+ const size_t dst_offset = (head + j * cells.size()) * v_size_el;
1525
+ ggml_backend_tensor_set(layer.v, io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
1526
+ }
1527
+ }
1528
+ }
1529
+ }
1530
+
1531
+ return true;
1532
+ }
1533
+
1534
+ //
1535
+ // llama_kv_cache_unified_iswa
1536
+ //
1537
+
1538
+ llama_kv_cache_unified_iswa::llama_kv_cache_unified_iswa(
1539
+ const llama_model & model,
1540
+ ggml_type type_k,
1541
+ ggml_type type_v,
1542
+ bool v_trans,
1543
+ bool offload,
1544
+ bool swa_full,
1545
+ uint32_t kv_size,
1546
+ uint32_t n_seq_max,
1547
+ uint32_t n_batch,
1548
+ uint32_t n_pad) : hparams(model.hparams) {
1549
+ llama_kv_cache_unified::layer_filter_cb filter_base = [&](int32_t il) { return !model.hparams.is_swa(il); };
1550
+ llama_kv_cache_unified::layer_filter_cb filter_swa = [&](int32_t il) { return model.hparams.is_swa(il); };
1551
+
1552
+ const uint32_t size_base = kv_size;
1553
+
1554
+ uint32_t size_swa = std::min(size_base, GGML_PAD(hparams.n_swa*n_seq_max + n_batch, n_pad));
1555
+
1556
+ // when using full-size SWA cache, we set the SWA cache size to be equal to the base cache size and disable pruning
1557
+ if (swa_full) {
1558
+ LLAMA_LOG_WARN("%s: using full-size SWA cache (ref: %s)\n",
1559
+ __func__, "https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055");
1560
+
1561
+ size_swa = size_base;
1562
+ do_prune = false;
1563
+ }
1564
+
1565
+ LLAMA_LOG_INFO("%s: creating non-SWA KV cache, size = %u cells\n", __func__, size_base);
1566
+
1567
+ kv_base = std::make_unique<llama_kv_cache_unified>(
1568
+ model, std::move(filter_base), type_k, type_v,
1569
+ v_trans, offload, size_base, n_seq_max, n_pad,
1570
+ 0, LLAMA_SWA_TYPE_NONE);
1571
+
1572
+ LLAMA_LOG_INFO("%s: creating SWA KV cache, size = %u cells\n", __func__, size_swa);
1573
+
1574
+ kv_swa = std::make_unique<llama_kv_cache_unified>(
1575
+ model, std::move(filter_swa), type_k, type_v,
1576
+ v_trans, offload, size_swa, n_seq_max, n_pad,
1577
+ hparams.n_swa, hparams.swa_type);
1578
+ }
1579
+
1580
+ void llama_kv_cache_unified_iswa::clear() {
1581
+ kv_base->clear();
1582
+ kv_swa ->clear();
1583
+ }
1584
+
1585
+ bool llama_kv_cache_unified_iswa::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
1586
+ bool res = true;
1587
+
1588
+ res = res & kv_base->seq_rm(seq_id, p0, p1);
1589
+ res = res & kv_swa ->seq_rm(seq_id, p0, p1);
1590
+
1591
+ return res;
1592
+ }
1593
+
1594
+ void llama_kv_cache_unified_iswa::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
1595
+ kv_base->seq_cp(seq_id_src, seq_id_dst, p0, p1);
1596
+ kv_swa ->seq_cp(seq_id_src, seq_id_dst, p0, p1);
1597
+ }
1598
+
1599
+ void llama_kv_cache_unified_iswa::seq_keep(llama_seq_id seq_id) {
1600
+ kv_base->seq_keep(seq_id);
1601
+ kv_swa ->seq_keep(seq_id);
1602
+ }
1603
+
1604
+ void llama_kv_cache_unified_iswa::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
1605
+ kv_base->seq_add(seq_id, p0, p1, shift);
1606
+ kv_swa ->seq_add(seq_id, p0, p1, shift);
1607
+ }
1608
+
1609
+ void llama_kv_cache_unified_iswa::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
1610
+ kv_base->seq_div(seq_id, p0, p1, d);
1611
+ kv_swa ->seq_div(seq_id, p0, p1, d);
1612
+ }
1613
+
1614
+ llama_pos llama_kv_cache_unified_iswa::seq_pos_min(llama_seq_id seq_id) const {
1615
+ // the base cache is a superset of the SWA cache, so we can just check the SWA cache
1616
+ return kv_swa->seq_pos_min(seq_id);
1617
+ }
1618
+
1619
+ llama_pos llama_kv_cache_unified_iswa::seq_pos_max(llama_seq_id seq_id) const {
1620
+ return kv_swa->seq_pos_max(seq_id);
1621
+ }
1622
+
1623
+ void llama_kv_cache_unified_iswa::restore() {
1624
+ kv_base->restore();
1625
+ kv_swa ->restore();
1626
+ }
1627
+
1628
+ void llama_kv_cache_unified_iswa::commit() {
1629
+ kv_base->commit();
1630
+ kv_swa ->commit();
1631
+
1632
+ // slide the attention window, forgetting/pruning old tokens that are outside the window
1633
+ if (do_prune) {
1634
+ for (const auto & [seq_id, entry] : pending.pos) {
1635
+ kv_swa->prune_swa(seq_id, entry.pmin, entry.pmax);
1636
+ }
1637
+
1638
+ }
1639
+
1640
+ pending.clear();
1641
+ }
1642
+
1643
+ bool llama_kv_cache_unified_iswa::update(llama_context & lctx) {
1644
+ bool res = true;
1645
+
1646
+ res = res & kv_base->update(lctx);
1647
+ res = res & kv_swa ->update(lctx);
1648
+
1649
+ return res;
1650
+ }
1651
+
1652
+ void llama_kv_cache_unified_iswa::defrag_sched(float thold) {
1653
+ kv_base->defrag_sched(thold);
1654
+ kv_swa ->defrag_sched(thold);
1655
+ }
1656
+
1657
+ void llama_kv_cache_unified_iswa::set_full() {
1658
+ kv_base->set_full();
1659
+ kv_swa ->set_full();
1660
+ }
1661
+
1662
+ llama_sbatch llama_kv_cache_unified_iswa::sbatch_init(const llama_batch & batch, bool logits_all) {
1663
+ pending.clear();
1664
+
1665
+ if (do_prune) {
1666
+ for (int i = 0; i < batch.n_tokens; ++i) {
1667
+ for (int s = 0; s < batch.n_seq_id[i]; ++s) {
1668
+ const llama_seq_id seq_id = batch.seq_id[i][s];
1669
+ const llama_pos pos = batch.pos[i];
1670
+
1671
+ if (pending.pos.find(seq_id) == pending.pos.end()) {
1672
+ pending.pos[seq_id].pmin = pos;
1673
+ pending.pos[seq_id].pmax = pos;
1674
+ } else {
1675
+ pending.pos[seq_id].pmin = std::min(pending.pos[seq_id].pmin, pos);
1676
+ pending.pos[seq_id].pmax = std::max(pending.pos[seq_id].pmax, pos);
1677
+ }
1678
+ }
1679
+ }
1680
+ }
1681
+
1682
+ return llama_sbatch(batch, hparams.n_embd, true, logits_all);
1683
+ }
1684
+
1685
+ llama_ubatch llama_kv_cache_unified_iswa::ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const {
1686
+ GGML_UNUSED(embd_pooled);
1687
+ return sbatch.split_simple(n_ubatch);
1688
+ }
1689
+
1690
+ bool llama_kv_cache_unified_iswa::find_slot(const llama_ubatch & batch) {
1691
+ bool res = true;
1692
+
1693
+ res = res & kv_base->find_slot(batch);
1694
+ res = res & kv_swa ->find_slot(batch);
1695
+
1696
+ return res;
1697
+ }
1698
+
1699
+ bool llama_kv_cache_unified_iswa::get_can_shift() const {
1700
+ return kv_base->get_size() == kv_swa->get_size();
1701
+ }
1702
+
1703
+ void llama_kv_cache_unified_iswa::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
1704
+ kv_base->state_write(io, seq_id);
1705
+ kv_swa ->state_write(io, seq_id);
1706
+ }
1707
+
1708
+ void llama_kv_cache_unified_iswa::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
1709
+ kv_base->state_read(io, seq_id);
1710
+ kv_swa ->state_read(io, seq_id);
1711
+ }
1712
+
1713
+ llama_kv_cache_unified * llama_kv_cache_unified_iswa::get_kv_base() const {
1714
+ return kv_base.get();
1715
+ }
1716
+
1717
+ llama_kv_cache_unified * llama_kv_cache_unified_iswa::get_kv_swa() const {
1718
+ return kv_swa.get();
1719
+ }
1720
+
1721
+ //
1722
+ // llama_kv_cache_recurrent
1723
+ //
1724
+
1725
+ llama_kv_cache_recurrent::llama_kv_cache_recurrent(
1726
+ const llama_model & model,
1727
+ ggml_type type_k,
1728
+ ggml_type type_v,
1729
+ bool offload,
1730
+ uint32_t kv_size,
1731
+ uint32_t n_seq_max) : hparams(model.hparams), n_seq_max(n_seq_max) {
1732
+ const int32_t n_layer = hparams.n_layer;
1733
+
1734
+ LLAMA_LOG_INFO("%s: kv_size = %u, n_seq_max = %u, type_k = '%s', type_v = '%s', n_layer = %d\n",
1735
+ __func__, kv_size, n_seq_max, ggml_type_name(type_k), ggml_type_name(type_v), n_layer);
1736
+
1737
+ head = 0;
1738
+ size = kv_size;
1739
+ used = 0;
1740
+
1741
+ cells.clear();
1742
+ cells.resize(kv_size);
1743
+
1744
+ // create a context for each buffer type
1745
+ std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
1746
+ auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
1747
+ auto it = ctx_map.find(buft);
1748
+ if (it == ctx_map.end()) {
1749
+ ggml_init_params params = {
1750
+ /*.mem_size =*/ size_t(2u*n_layer*ggml_tensor_overhead()),
1751
+ /*.mem_buffer =*/ NULL,
1752
+ /*.no_alloc =*/ true,
1753
+ };
1754
+
1755
+ ggml_context * ctx = ggml_init(params);
1756
+ if (!ctx) {
1757
+ return nullptr;
1758
+ }
1759
+
1760
+ ctx_map[buft] = ctx;
1761
+ ctxs.emplace_back(ctx);
1762
+
1763
+ return ctx;
1764
+ }
1765
+
1766
+ return it->second;
1767
+ };
1768
+
1769
+ k_l.reserve(n_layer);
1770
+ v_l.reserve(n_layer);
1771
+
1772
+ for (int i = 0; i < n_layer; i++) {
1773
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
1774
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
1775
+
1776
+ const char * dev_name = "CPU";
1777
+
1778
+ ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type();
1779
+
1780
+ if (offload) {
1781
+ auto * dev = model.dev_layer(i);
1782
+ buft = ggml_backend_dev_buffer_type(dev);
1783
+
1784
+ dev_name = ggml_backend_dev_name(dev);
1785
+ }
1786
+
1787
+ LLAMA_LOG_DEBUG("%s, layer %3d: dev = %s\n", __func__, i, dev_name);
1788
+
1789
+ ggml_context * ctx = ctx_for_buft(buft);
1790
+ if (!ctx) {
1791
+ throw std::runtime_error("failed to create ggml context for kv cache");
1792
+ }
1793
+
1794
+ ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size);
1795
+ ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size);
1796
+ ggml_format_name(k, "cache_k_l%d", i);
1797
+ ggml_format_name(v, "cache_v_l%d", i);
1798
+ k_l.push_back(k);
1799
+ v_l.push_back(v);
1800
+ }
1801
+
1802
+ // allocate tensors and initialize the buffers to avoid NaNs in the padding
1803
+ for (auto it : ctx_map) {
1804
+ auto * buft = it.first;
1805
+ auto * ctx = it.second;
1806
+
1807
+ ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
1808
+ if (!buf) {
1809
+ throw std::runtime_error("failed to allocate buffer for kv cache");
1810
+ }
1811
+ ggml_backend_buffer_clear(buf, 0);
1812
+ LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
1813
+ bufs.emplace_back(buf);
1814
+ }
1815
+
1816
+ {
1817
+ const size_t memory_size_k = size_k_bytes();
1818
+ const size_t memory_size_v = size_v_bytes();
1819
+
1820
+ LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
1821
+ (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
1822
+ ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
1823
+ ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
1824
+ }
1825
+ }
1826
+
1827
+ void llama_kv_cache_recurrent::clear() {
1828
+ for (int32_t i = 0; i < (int32_t) size; ++i) {
1829
+ cells[i].pos = -1;
1830
+ cells[i].seq_id.clear();
1831
+ cells[i].src = -1;
1832
+ cells[i].tail = -1;
1833
+ }
1834
+ head = 0;
1835
+ used = 0;
1836
+
1837
+ for (auto & buf : bufs) {
1838
+ ggml_backend_buffer_clear(buf.get(), 0);
1839
+ }
1840
+ }
1841
+
1842
+ bool llama_kv_cache_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
1843
+ uint32_t new_head = size;
1844
+
1845
+ if (p0 < 0) {
1846
+ p0 = 0;
1847
+ }
1848
+
1849
+ if (p1 < 0) {
1850
+ p1 = std::numeric_limits<llama_pos>::max();
1851
+ }
1852
+
1853
+ // models like Mamba or RWKV can't have a state partially erased
1854
+ if (seq_id >= (int64_t) size) {
1855
+ // could be fatal
1856
+ return false;
1857
+ }
1858
+ if (0 <= seq_id) {
1859
+ int32_t & tail_id = cells[seq_id].tail;
1860
+ if (tail_id >= 0) {
1861
+ const kv_cell & cell = cells[tail_id];
1862
+ // partial intersection is invalid
1863
+ if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) {
1864
+ return false;
1865
+ }
1866
+ // invalidate tails which will be cleared
1867
+ if (p0 <= cell.pos && cell.pos < p1) {
1868
+ tail_id = -1;
1869
+ }
1870
+ }
1871
+ } else {
1872
+ // seq_id is negative, then the range should include everything or nothing
1873
+ if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits<llama_pos>::max())) {
1874
+ return false;
1875
+ }
1876
+ }
1877
+
1878
+ for (uint32_t i = 0; i < size; ++i) {
1879
+ if (cells[i].pos >= p0 && cells[i].pos < p1) {
1880
+ if (seq_id < 0) {
1881
+ cells[i].seq_id.clear();
1882
+ } else if (cells[i].has_seq_id(seq_id)) {
1883
+ cells[i].seq_id.erase(seq_id);
1884
+ } else {
1885
+ continue;
1886
+ }
1887
+ if (cells[i].is_empty()) {
1888
+ // keep count of the number of used cells
1889
+ if (cells[i].pos >= 0) {
1890
+ used--;
1891
+ }
1892
+ cells[i].pos = -1;
1893
+ cells[i].src = -1;
1894
+ if (new_head == size) {
1895
+ new_head = i;
1896
+ }
1897
+ }
1898
+ }
1899
+ }
1900
+
1901
+ // If we freed up a slot, set head to it so searching can start there.
1902
+ if (new_head != size && new_head < head) {
1903
+ head = new_head;
1904
+ }
1905
+
1906
+ return true;
1907
+ }
1908
+
1909
+ void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
1910
+ if (seq_id_src == seq_id_dst) {
1911
+ return;
1912
+ }
1913
+
1914
+ if (p0 < 0) {
1915
+ p0 = 0;
1916
+ }
1917
+
1918
+ if (p1 < 0) {
1919
+ p1 = std::numeric_limits<llama_pos>::max();
1920
+ }
1921
+
1922
+ if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) {
1923
+ kv_cell & tail_src = cells[seq_id_src];
1924
+ kv_cell & tail_dst = cells[seq_id_dst];
1925
+ if (tail_dst.tail >= 0) {
1926
+ // clear destination seq_id if it wasn't empty
1927
+ kv_cell & cell_dst = cells[tail_dst.tail];
1928
+
1929
+ cell_dst.seq_id.erase(seq_id_dst);
1930
+ tail_dst.tail = -1;
1931
+ if (cell_dst.seq_id.empty()) {
1932
+ cell_dst.pos = -1;
1933
+ cell_dst.src = -1;
1934
+ used -= 1;
1935
+ }
1936
+ }
1937
+ if (tail_src.tail >= 0) {
1938
+ kv_cell & cell_src = cells[tail_src.tail];
1939
+
1940
+ cell_src.seq_id.insert(seq_id_dst);
1941
+ tail_dst.tail = tail_src.tail;
1942
+ }
1943
+ }
1944
+ }
1945
+
1946
+ void llama_kv_cache_recurrent::seq_keep(llama_seq_id seq_id) {
1947
+ uint32_t new_head = size;
1948
+
1949
+ for (uint32_t i = 0; i < size; ++i) {
1950
+ if ((llama_seq_id) i != seq_id) {
1951
+ cells[i].tail = -1;
1952
+ }
1953
+
1954
+ if (!cells[i].has_seq_id(seq_id)) {
1955
+ if (cells[i].pos >= 0) {
1956
+ used--;
1957
+ }
1958
+
1959
+ cells[i].pos = -1;
1960
+ cells[i].src = -1;
1961
+ cells[i].seq_id.clear();
1962
+
1963
+ if (new_head == size){
1964
+ new_head = i;
1965
+ }
1966
+ } else {
1967
+ cells[i].seq_id.clear();
1968
+ cells[i].seq_id.insert(seq_id);
1969
+ }
1970
+ }
1971
+
1972
+ // If we freed up a slot, set head to it so searching can start there.
1973
+ if (new_head != size && new_head < head) {
1974
+ head = new_head;
1975
+ }
1976
+ }
1977
+
1978
+ void llama_kv_cache_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
1979
+ if (shift == 0) {
1980
+ return;
1981
+ }
1982
+
1983
+ if (p0 < 0) {
1984
+ p0 = 0;
1985
+ }
1986
+
1987
+ if (p1 < 0) {
1988
+ p1 = std::numeric_limits<llama_pos>::max();
1989
+ }
1990
+
1991
+ // If there is no range then return early to avoid looping over the
1992
+ if (p0 == p1) {
1993
+ return;
1994
+ }
1995
+
1996
+ // for Mamba-like or RWKV models, only the pos needs to be shifted
1997
+ if (0 <= seq_id && seq_id < (int64_t) size) {
1998
+ const int32_t tail_id = cells[seq_id].tail;
1999
+ if (tail_id >= 0) {
2000
+ kv_cell & cell = cells[tail_id];
2001
+ if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
2002
+ cell.pos += shift;
2003
+ }
2004
+ }
2005
+ }
2006
+ }
2007
+
2008
+ void llama_kv_cache_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
2009
+ if (d == 1) {
2010
+ return;
2011
+ }
2012
+
2013
+ if (p0 < 0) {
2014
+ p0 = 0;
2015
+ }
2016
+
2017
+ if (p1 < 0) {
2018
+ p1 = std::numeric_limits<llama_pos>::max();
2019
+ }
2020
+
2021
+ // If there is no range then return early to avoid looping over the cache.
2022
+ if (p0 == p1) {
2023
+ return;
2024
+ }
2025
+
2026
+ // for Mamba-like or RWKV models, only the pos needs to be changed
2027
+ if (0 <= seq_id && seq_id < (int64_t) size) {
2028
+ const int32_t tail_id = cells[seq_id].tail;
2029
+ if (tail_id >= 0) {
2030
+ kv_cell & cell = cells[tail_id];
2031
+ if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
2032
+ cell.pos /= d;
2033
+ }
2034
+ }
2035
+ }
2036
+ }
2037
+
2038
+ llama_pos llama_kv_cache_recurrent::seq_pos_min(llama_seq_id seq_id) const {
2039
+ llama_pos result = std::numeric_limits<llama_pos>::max();
2040
+
2041
+ for (uint32_t i = 0; i < size; ++i) {
2042
+ if (cells[i].has_seq_id(seq_id)) {
2043
+ result = std::min(result, cells[i].pos);
2044
+ }
2045
+ }
2046
+
2047
+ if (result == std::numeric_limits<llama_pos>::max()) {
2048
+ result = -1;
2049
+ }
2050
+
2051
+ return result;
2052
+ }
2053
+
2054
+ llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const {
2055
+ llama_pos result = -1;
2056
+
2057
+ for (uint32_t i = 0; i < size; ++i) {
2058
+ if (cells[i].has_seq_id(seq_id)) {
2059
+ result = std::max(result, cells[i].pos);
2060
+ }
2061
+ }
2062
+
2063
+ return result;
2064
+ }
2065
+
2066
+ void llama_kv_cache_recurrent::restore() {
2067
+ if (pending.ranges.empty()) {
2068
+ return;
2069
+ }
2070
+
2071
+ seq_rm(-1, -1, -1);
2072
+ }
2073
+
2074
+ void llama_kv_cache_recurrent::commit() {
2075
+ pending.ranges.clear();
2076
+ }
2077
+
2078
+ bool llama_kv_cache_recurrent::update(llama_context & ctx) {
2079
+ GGML_UNUSED(ctx);
2080
+ return false;
2081
+ }
2082
+
2083
+ void llama_kv_cache_recurrent::defrag_sched(float thold) {
2084
+ GGML_UNUSED(thold);
2085
+ // noop
2086
+ }
2087
+
2088
+ void llama_kv_cache_recurrent::set_full() {
2089
+ n = size;
2090
+ head = 0;
2091
+ }
2092
+
2093
+ llama_sbatch llama_kv_cache_recurrent::sbatch_init(
2094
+ const llama_batch & batch,
2095
+ bool logits_all) {
2096
+ return llama_sbatch(batch, hparams.n_embd, false, logits_all);
2097
+ }
2098
+
2099
+ llama_ubatch llama_kv_cache_recurrent::ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const {
2100
+ if (embd_pooled) {
2101
+ // Pooled embeddings cannot be split across ubatches (yet)
2102
+ return sbatch.split_seq(n_ubatch);
2103
+ }
2104
+
2105
+ return sbatch.split_equal(n_ubatch);
2106
+ }
2107
+
2108
+ bool llama_kv_cache_recurrent::find_slot(
2109
+ const llama_ubatch & ubatch) {
2110
+ const uint32_t n_tokens = ubatch.n_tokens;
2111
+ const uint32_t n_seqs = ubatch.n_seqs;
2112
+
2113
+ const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
2114
+
2115
+ // if we have enough unused cells before the current head ->
2116
+ // better to start searching from the beginning of the cache, hoping to fill it
2117
+ if (head > used + 2*n_tokens) {
2118
+ head = 0;
2119
+ }
2120
+
2121
+ // For recurrent state architectures (like Mamba or RWKV),
2122
+ // each cache cell can store the state for a whole sequence.
2123
+ // A slot should be always be contiguous.
2124
+
2125
+ // can only process batches with an equal number of new tokens in each sequence
2126
+ GGML_ASSERT(ubatch.equal_seqs);
2127
+
2128
+ int32_t min = size - 1;
2129
+ int32_t max = 0;
2130
+
2131
+ // everything should fit if all seq_ids are smaller than the max
2132
+ for (uint32_t s = 0; s < n_seqs; ++s) {
2133
+ const uint32_t n_seq_id = ubatch.n_seq_id[s];
2134
+ for (uint32_t j = 0; j < n_seq_id; ++j) {
2135
+ const llama_seq_id seq_id = ubatch.seq_id[s][j];
2136
+
2137
+ if (seq_id < 0 || (uint32_t) seq_id >= size) {
2138
+ // too big seq_id
2139
+ // TODO: would it be possible to resize the cache instead?
2140
+ LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%u Try using a bigger --parallel value\n", __func__, seq_id, n_seq_max);
2141
+ return false;
2142
+ }
2143
+ if (j > 0) {
2144
+ kv_cell & seq = cells[seq_id];
2145
+ if (seq.tail >= 0) {
2146
+ kv_cell & cell = cells[seq.tail];
2147
+ // clear cells from seq_ids that become shared
2148
+ // (should not normally happen, but let's handle it anyway)
2149
+ cell.seq_id.erase(seq_id);
2150
+ seq.tail = -1;
2151
+ if (cell.seq_id.empty()) {
2152
+ cell.pos = -1;
2153
+ cell.src = -1;
2154
+ used -= 1;
2155
+ }
2156
+ }
2157
+ }
2158
+ }
2159
+ }
2160
+
2161
+ #ifndef NDEBUG
2162
+ {
2163
+ std::vector<int32_t> tails_verif;
2164
+ tails_verif.assign(size, -1);
2165
+ for (uint32_t i = 0; i < size; ++i) {
2166
+ kv_cell & cell = cells[i];
2167
+ for (llama_seq_id seq_id : cell.seq_id) {
2168
+ if (tails_verif[seq_id] != -1) {
2169
+ LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]);
2170
+ }
2171
+ tails_verif[seq_id] = i;
2172
+ }
2173
+ }
2174
+ for (uint32_t i = 0; i < size; ++i) {
2175
+ if (tails_verif[i] != cells[i].tail) {
2176
+ LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cells[i].tail, tails_verif[i]);
2177
+ }
2178
+ }
2179
+ }
2180
+ #endif
2181
+
2182
+ // find next empty cell
2183
+ uint32_t next_empty_cell = head;
2184
+
2185
+ for (uint32_t i = 0; i < size; ++i) {
2186
+ if (next_empty_cell >= size) { next_empty_cell -= size; }
2187
+ kv_cell & cell = cells[next_empty_cell];
2188
+ if (cell.is_empty()) { break; }
2189
+ next_empty_cell += 1;
2190
+ }
2191
+
2192
+ // find usable cell range
2193
+ for (uint32_t s = 0; s < n_seqs; ++s) {
2194
+ const llama_seq_id seq_id = ubatch.seq_id[s][0];
2195
+ kv_cell & seq_meta = cells[seq_id];
2196
+ bool has_cell = false;
2197
+ if (seq_meta.tail >= 0) {
2198
+ kv_cell & cell = cells[seq_meta.tail];
2199
+ GGML_ASSERT(cell.has_seq_id(seq_id));
2200
+ // does this seq_id "own" the cell?
2201
+ if (cell.seq_id.size() == 1) { has_cell = true; }
2202
+ }
2203
+ if (!has_cell) {
2204
+ kv_cell & empty_cell = cells[next_empty_cell];
2205
+ GGML_ASSERT(empty_cell.is_empty());
2206
+ // copy old tail into the empty cell
2207
+ if (seq_meta.tail >= 0) {
2208
+ kv_cell & orig_cell = cells[seq_meta.tail];
2209
+ empty_cell.pos = orig_cell.pos;
2210
+ empty_cell.src = orig_cell.src;
2211
+ orig_cell.seq_id.erase(seq_id);
2212
+ empty_cell.seq_id.insert(seq_id); // will be overwritten
2213
+ }
2214
+ seq_meta.tail = next_empty_cell;
2215
+ // find next empty cell
2216
+ if (s + 1 < n_seqs) {
2217
+ next_empty_cell += 1;
2218
+ for (uint32_t i = 0; i < size; ++i) {
2219
+ if (next_empty_cell >= size) { next_empty_cell -= size; }
2220
+ kv_cell & cell = cells[next_empty_cell];
2221
+ if (cell.is_empty()) { break; }
2222
+ next_empty_cell += 1;
2223
+ }
2224
+ }
2225
+ }
2226
+ if (min > seq_meta.tail) { min = seq_meta.tail; }
2227
+ if (max < seq_meta.tail) { max = seq_meta.tail; }
2228
+ }
2229
+
2230
+ // gather and re-order
2231
+ for (uint32_t s = 0; s < n_seqs; ++s) {
2232
+ int32_t dst_id = s + min;
2233
+ int32_t src_id = cells[ubatch.seq_id[s][0]].tail;
2234
+ if (dst_id != src_id) {
2235
+ kv_cell & dst_cell = cells[dst_id];
2236
+ kv_cell & src_cell = cells[src_id];
2237
+
2238
+ std::swap(dst_cell.pos, src_cell.pos);
2239
+ std::swap(dst_cell.src, src_cell.src);
2240
+ std::swap(dst_cell.seq_id, src_cell.seq_id);
2241
+
2242
+ // swap tails (assuming they NEVER overlap)
2243
+ for (const llama_seq_id seq_id : src_cell.seq_id) {
2244
+ cells[seq_id].tail = src_id;
2245
+ }
2246
+ for (const llama_seq_id seq_id : dst_cell.seq_id) {
2247
+ cells[seq_id].tail = dst_id;
2248
+ }
2249
+ }
2250
+ }
2251
+
2252
+ // update the pos of the used seqs
2253
+ for (uint32_t s = 0; s < n_seqs; ++s) {
2254
+ const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1];
2255
+ int32_t cell_id = s + min;
2256
+ kv_cell & cell = cells[cell_id];
2257
+
2258
+ if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) {
2259
+ // What should happen when the pos backtracks or skips a value?
2260
+ // Clearing the state mid-batch would require special-casing which isn't done.
2261
+ LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
2262
+ __func__, last_pos, cell.pos, ubatch.seq_id[s][0], n_seq_tokens);
2263
+ }
2264
+ cell.pos = last_pos;
2265
+ cell.seq_id.clear();
2266
+ for (int32_t j = 0; j < ubatch.n_seq_id[s]; ++j) {
2267
+ const llama_seq_id seq_id = ubatch.seq_id[s][j];
2268
+ cell.seq_id.insert(seq_id);
2269
+ cells[seq_id].tail = cell_id;
2270
+ }
2271
+ }
2272
+
2273
+ // allow getting the range of used cells, from head to head + n
2274
+ head = min;
2275
+ n = max - min + 1;
2276
+ used = std::count_if(cells.begin(), cells.end(),
2277
+ [](const kv_cell & cell){ return !cell.is_empty(); });
2278
+
2279
+ // sanity check
2280
+ return n >= n_seqs;
2281
+ }
2282
+
2283
+ bool llama_kv_cache_recurrent::get_can_shift() const {
2284
+ return false;
2285
+ }
2286
+
2287
+ int32_t llama_kv_cache_recurrent::s_copy(int i) const {
2288
+ const uint32_t cell_id = i + head;
2289
+
2290
+ //////////////////////////////////////////////
2291
+ // TODO: this should not mutate the KV cache !
2292
+ kv_cell & cell = const_cast<kv_cell &>(cells[cell_id]);
2293
+
2294
+ // prevent out-of-bound sources
2295
+ if (cell.src < 0 || (uint32_t) cell.src >= size) {
2296
+ cell.src = cell_id;
2297
+ }
2298
+
2299
+ int32_t res = cell.src;
2300
+
2301
+ // TODO: do not mutate the KV cache
2302
+ // ensure copy only happens once
2303
+ if (cell.src != (int32_t) cell_id) {
2304
+ cell.src = cell_id;
2305
+ }
2306
+
2307
+ return res;
2308
+ }
2309
+
2310
+ float llama_kv_cache_recurrent::s_mask(int i) const {
2311
+ const uint32_t cell_id = i + head;
2312
+
2313
+ //////////////////////////////////////////////
2314
+ // TODO: this should not mutate the KV cache !
2315
+ kv_cell & cell = const_cast<kv_cell &>(cells[cell_id]);
2316
+
2317
+ float res = (float) (cell.src >= 0);
2318
+
2319
+ // only clear once
2320
+ if (cell.src < 0) {
2321
+ cell.src = cell_id;
2322
+ }
2323
+
2324
+ return res;
2325
+ }
2326
+
2327
+ uint32_t llama_kv_cache_recurrent::cell_max() const {
2328
+ for (uint32_t i = size; i > 0; --i) {
2329
+ const kv_cell & cell = cells[i - 1];
2330
+
2331
+ if (cell.pos >= 0 && !cell.is_empty()) {
2332
+ return i;
2333
+ }
2334
+ }
2335
+
2336
+ return 0;
2337
+ }
2338
+
2339
+ size_t llama_kv_cache_recurrent::total_size() const {
2340
+ size_t size = 0;
2341
+ for (const auto & buf : bufs) {
2342
+ size += ggml_backend_buffer_get_size(buf.get());
2343
+ }
2344
+
2345
+ return size;
2346
+ }
2347
+
2348
+ size_t llama_kv_cache_recurrent::size_k_bytes() const {
2349
+ size_t size_k_bytes = 0;
2350
+
2351
+ for (const auto & k : k_l) {
2352
+ size_k_bytes += ggml_nbytes(k);
2353
+ }
2354
+
2355
+ return size_k_bytes;
2356
+ }
2357
+
2358
+ size_t llama_kv_cache_recurrent::size_v_bytes() const {
2359
+ size_t size_v_bytes = 0;
2360
+
2361
+ for (const auto & v : v_l) {
2362
+ size_v_bytes += ggml_nbytes(v);
2363
+ }
2364
+
2365
+ return size_v_bytes;
2366
+ }
2367
+
2368
+ void llama_kv_cache_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
2369
+ std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
2370
+ uint32_t cell_count = 0;
2371
+
2372
+ // Count the number of cells with the specified seq_id
2373
+ // Find all the ranges of cells with this seq id (or all, when -1)
2374
+ uint32_t cell_range_begin = size;
2375
+ for (uint32_t i = 0; i < size; ++i) {
2376
+ const auto & cell = cells[i];
2377
+ if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) {
2378
+ ++cell_count;
2379
+ if (cell_range_begin == size) {
2380
+ cell_range_begin = i;
2381
+ }
2382
+ } else {
2383
+ if (cell_range_begin != size) {
2384
+ cell_ranges.emplace_back(cell_range_begin, i);
2385
+ cell_range_begin = size;
2386
+ }
2387
+ }
2388
+ }
2389
+ if (cell_range_begin != size) {
2390
+ cell_ranges.emplace_back(cell_range_begin, size);
2391
+ }
2392
+
2393
+ // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
2394
+ uint32_t cell_count_check = 0;
2395
+ for (const auto & range : cell_ranges) {
2396
+ cell_count_check += range.second - range.first;
2397
+ }
2398
+ GGML_ASSERT(cell_count == cell_count_check);
2399
+
2400
+ io.write(&cell_count, sizeof(cell_count));
2401
+
2402
+ state_write_meta(io, cell_ranges, seq_id);
2403
+ state_write_data(io, cell_ranges);
2404
+ }
2405
+
2406
+ void llama_kv_cache_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
2407
+ uint32_t cell_count;
2408
+ io.read_to(&cell_count, sizeof(cell_count));
2409
+
2410
+ bool res = true;
2411
+
2412
+ res = res && state_read_meta(io, cell_count, seq_id);
2413
+ res = res && state_read_data(io, cell_count);
2414
+
2415
+ if (!res) {
2416
+ if (seq_id == -1) {
2417
+ clear();
2418
+ } else {
2419
+ seq_rm(seq_id, -1, -1);
2420
+ }
2421
+ throw std::runtime_error("failed to restore kv cache");
2422
+ }
2423
+ }
2424
+
2425
+ void llama_kv_cache_recurrent::state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id) const {
2426
+ for (const auto & range : cell_ranges) {
2427
+ for (uint32_t i = range.first; i < range.second; ++i) {
2428
+ const auto & cell = cells[i];
2429
+ const llama_pos pos = cell.pos;
2430
+ const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0;
2431
+
2432
+ io.write(&pos, sizeof(pos));
2433
+ io.write(&n_seq_id, sizeof(n_seq_id));
2434
+
2435
+ if (n_seq_id) {
2436
+ for (auto seq_id : cell.seq_id) {
2437
+ io.write(&seq_id, sizeof(seq_id));
2438
+ }
2439
+ }
2440
+ }
2441
+ }
2442
+ }
2443
+
2444
+ void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const {
2445
+ const uint32_t v_trans = 0;
2446
+ const uint32_t n_layer = hparams.n_layer;
2447
+
2448
+ io.write(&v_trans, sizeof(v_trans));
2449
+ io.write(&n_layer, sizeof(n_layer));
2450
+
2451
+ std::vector<uint8_t> tmp_buf;
2452
+
2453
+ // Iterate and write all the keys first, each row is a cell
2454
+ // Get whole range at a time
2455
+ for (uint32_t il = 0; il < n_layer; ++il) {
2456
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
2457
+
2458
+ // Write key type
2459
+ const int32_t k_type_i = (int32_t)k_l[il]->type;
2460
+ io.write(&k_type_i, sizeof(k_type_i));
2461
+
2462
+ // Write row size of key
2463
+ const uint64_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa);
2464
+ io.write(&k_size_row, sizeof(k_size_row));
2465
+
2466
+ // Read each range of cells of k_size length each into tmp_buf and write out
2467
+ for (const auto & range : cell_ranges) {
2468
+ const size_t range_size = range.second - range.first;
2469
+ const size_t buf_size = range_size * k_size_row;
2470
+ io.write_tensor(k_l[il], range.first * k_size_row, buf_size);
2471
+ }
2472
+ }
2473
+
2474
+ if (!v_trans) {
2475
+ for (uint32_t il = 0; il < n_layer; ++il) {
2476
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
2477
+
2478
+ // Write value type
2479
+ const int32_t v_type_i = (int32_t)v_l[il]->type;
2480
+ io.write(&v_type_i, sizeof(v_type_i));
2481
+
2482
+ // Write row size of value
2483
+ const uint64_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa);
2484
+ io.write(&v_size_row, sizeof(v_size_row));
2485
+
2486
+ // Read each range of cells of v_size length each into tmp_buf and write out
2487
+ for (const auto & range : cell_ranges) {
2488
+ const size_t range_size = range.second - range.first;
2489
+ const size_t buf_size = range_size * v_size_row;
2490
+ io.write_tensor(v_l[il], range.first * v_size_row, buf_size);
2491
+ }
2492
+ }
2493
+ } else {
2494
+ // When v is transposed, we also need the element size and get the element ranges from each row
2495
+ const uint32_t kv_size = size;
2496
+ for (uint32_t il = 0; il < n_layer; ++il) {
2497
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
2498
+
2499
+ // Write value type
2500
+ const int32_t v_type_i = (int32_t)v_l[il]->type;
2501
+ io.write(&v_type_i, sizeof(v_type_i));
2502
+
2503
+ // Write element size
2504
+ const uint32_t v_size_el = ggml_type_size(v_l[il]->type);
2505
+ io.write(&v_size_el, sizeof(v_size_el));
2506
+
2507
+ // Write GQA embedding size
2508
+ io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
2509
+
2510
+ // For each row, we get the element values of each cell
2511
+ for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
2512
+ // Read each range of cells of v_size_el length each into tmp_buf and write out
2513
+ for (const auto & range : cell_ranges) {
2514
+ const size_t range_size = range.second - range.first;
2515
+ const size_t src_offset = (range.first + j * kv_size) * v_size_el;
2516
+ const size_t buf_size = range_size * v_size_el;
2517
+ io.write_tensor(v_l[il], src_offset, buf_size);
2518
+ }
2519
+ }
2520
+ }
2521
+ }
2522
+ }
2523
+
2524
+ bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) {
2525
+ if (dest_seq_id != -1) {
2526
+ // single sequence
2527
+
2528
+ seq_rm(dest_seq_id, -1, -1);
2529
+
2530
+ llama_sbatch sbatch;
2531
+ llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
2532
+
2533
+ batch.n_tokens = cell_count;
2534
+ batch.n_seq_tokens = cell_count;
2535
+ batch.n_seqs = 1;
2536
+
2537
+ for (uint32_t i = 0; i < cell_count; ++i) {
2538
+ llama_pos pos;
2539
+ uint32_t n_seq_id;
2540
+
2541
+ io.read_to(&pos, sizeof(pos));
2542
+ io.read_to(&n_seq_id, sizeof(n_seq_id));
2543
+
2544
+ if (n_seq_id != 0) {
2545
+ LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
2546
+ return false;
2547
+ }
2548
+
2549
+ batch.pos[i] = pos;
2550
+ }
2551
+ batch.n_seq_id[0] = 1;
2552
+ batch.seq_id[0] = &dest_seq_id;
2553
+ if (!find_slot(batch)) {
2554
+ LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
2555
+ return false;
2556
+ }
2557
+ commit();
2558
+
2559
+ // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
2560
+ // Assume that this is one contiguous block of cells
2561
+ GGML_ASSERT(head + cell_count <= size);
2562
+ GGML_ASSERT(cells[head].pos == batch.pos[0]);
2563
+ GGML_ASSERT(cells[head + cell_count - 1].pos == batch.pos[cell_count - 1]);
2564
+ GGML_ASSERT(cells[head].has_seq_id(dest_seq_id));
2565
+ GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id));
2566
+ } else {
2567
+ // whole KV cache restore
2568
+
2569
+ if (cell_count > size) {
2570
+ LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
2571
+ return false;
2572
+ }
2573
+
2574
+ clear();
2575
+
2576
+ for (uint32_t i = 0; i < cell_count; ++i) {
2577
+ kv_cell & cell = cells[i];
2578
+
2579
+ llama_pos pos;
2580
+ uint32_t n_seq_id;
2581
+
2582
+ io.read_to(&pos, sizeof(pos));
2583
+ io.read_to(&n_seq_id, sizeof(n_seq_id));
2584
+
2585
+ cell.pos = pos;
2586
+
2587
+ for (uint32_t j = 0; j < n_seq_id; ++j) {
2588
+ llama_seq_id seq_id;
2589
+ io.read_to(&seq_id, sizeof(seq_id));
2590
+
2591
+ // TODO: llama_kv_cache_recurrent should have a notion of max sequences
2592
+ //if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) {
2593
+ if (seq_id < 0) {
2594
+ //LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx));
2595
+ LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, inf)\n", __func__, seq_id);
2596
+ return false;
2597
+ }
2598
+
2599
+ cell.seq_id.insert(seq_id);
2600
+
2601
+ int32_t & tail = cells[seq_id].tail;
2602
+ if (tail != -1) {
2603
+ LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail);
2604
+ return false;
2605
+ }
2606
+ tail = i;
2607
+ }
2608
+ }
2609
+
2610
+ head = 0;
2611
+ used = cell_count;
2612
+ }
2613
+
2614
+ for (uint32_t i = 0; i < cell_count; ++i) {
2615
+ uint32_t cell_id = head + i;
2616
+ // make sure the recurrent states will keep their restored state
2617
+ cells[cell_id].src = cell_id;
2618
+ }
2619
+
2620
+ return true;
2621
+ }
2622
+
2623
+ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) {
2624
+ uint32_t v_trans;
2625
+ uint32_t n_layer;
2626
+ io.read_to(&v_trans, sizeof(v_trans));
2627
+ io.read_to(&n_layer, sizeof(n_layer));
2628
+
2629
+ if (n_layer != hparams.n_layer) {
2630
+ LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer);
2631
+ return false;
2632
+ }
2633
+ if (cell_count > size) {
2634
+ LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size);
2635
+ return false;
2636
+ }
2637
+ if (false != (bool) v_trans) {
2638
+ LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
2639
+ return false;
2640
+ }
2641
+
2642
+ // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
2643
+ for (uint32_t il = 0; il < n_layer; ++il) {
2644
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
2645
+
2646
+ // Read type of key
2647
+ int32_t k_type_i_ref;
2648
+ io.read_to(&k_type_i_ref, sizeof(k_type_i_ref));
2649
+ const int32_t k_type_i = (int32_t) k_l[il]->type;
2650
+ if (k_type_i != k_type_i_ref) {
2651
+ LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
2652
+ return false;
2653
+ }
2654
+
2655
+ // Read row size of key
2656
+ uint64_t k_size_row_ref;
2657
+ io.read_to(&k_size_row_ref, sizeof(k_size_row_ref));
2658
+ const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa);
2659
+ if (k_size_row != k_size_row_ref) {
2660
+ LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
2661
+ return false;
2662
+ }
2663
+
2664
+ if (cell_count) {
2665
+ // Read and set the keys for the whole cell range
2666
+ ggml_backend_tensor_set(k_l[il], io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row);
2667
+ }
2668
+ }
2669
+
2670
+ if (!v_trans) {
2671
+ for (uint32_t il = 0; il < n_layer; ++il) {
2672
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
2673
+
2674
+ // Read type of value
2675
+ int32_t v_type_i_ref;
2676
+ io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
2677
+ const int32_t v_type_i = (int32_t)v_l[il]->type;
2678
+ if (v_type_i != v_type_i_ref) {
2679
+ LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
2680
+ return false;
2681
+ }
2682
+
2683
+ // Read row size of value
2684
+ uint64_t v_size_row_ref;
2685
+ io.read_to(&v_size_row_ref, sizeof(v_size_row_ref));
2686
+ const size_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa);
2687
+ if (v_size_row != v_size_row_ref) {
2688
+ LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
2689
+ return false;
2690
+ }
2691
+
2692
+ if (cell_count) {
2693
+ // Read and set the values for the whole cell range
2694
+ ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row);
2695
+ }
2696
+ }
2697
+ } else {
2698
+ // For each layer, read the values for each cell (transposed)
2699
+ for (uint32_t il = 0; il < n_layer; ++il) {
2700
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
2701
+
2702
+ // Read type of value
2703
+ int32_t v_type_i_ref;
2704
+ io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
2705
+ const int32_t v_type_i = (int32_t)v_l[il]->type;
2706
+ if (v_type_i != v_type_i_ref) {
2707
+ LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
2708
+ return false;
2709
+ }
2710
+
2711
+ // Read element size of value
2712
+ uint32_t v_size_el_ref;
2713
+ io.read_to(&v_size_el_ref, sizeof(v_size_el_ref));
2714
+ const size_t v_size_el = ggml_type_size(v_l[il]->type);
2715
+ if (v_size_el != v_size_el_ref) {
2716
+ LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
2717
+ return false;
2718
+ }
2719
+
2720
+ // Read GQA embedding size
2721
+ uint32_t n_embd_v_gqa_ref;
2722
+ io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref));
2723
+ if (n_embd_v_gqa != n_embd_v_gqa_ref) {
2724
+ LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il);
2725
+ return false;
2726
+ }
2727
+
2728
+ if (cell_count) {
2729
+ // For each row in the transposed matrix, read the values for the whole cell range
2730
+ for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
2731
+ const size_t dst_offset = (head + j * size) * v_size_el;
2732
+ ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
2733
+ }
2734
+ }
2735
+ }
2736
+ }
2737
+
2738
+ return true;
2739
+ }