whispercpp 1.3.1 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (797) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +4 -3
  3. data/README.md +92 -31
  4. data/Rakefile +26 -7
  5. data/ext/.gitignore +5 -7
  6. data/ext/dependencies.rb +61 -0
  7. data/ext/extconf.rb +21 -198
  8. data/ext/options.rb +221 -0
  9. data/ext/ruby_whisper.c +159 -0
  10. data/ext/ruby_whisper.h +17 -2
  11. data/ext/ruby_whisper_context.c +641 -0
  12. data/ext/ruby_whisper_error.c +52 -0
  13. data/ext/ruby_whisper_model.c +232 -0
  14. data/ext/ruby_whisper_params.c +1301 -0
  15. data/ext/ruby_whisper_segment.c +143 -0
  16. data/ext/ruby_whisper_transcribe.cpp +87 -0
  17. data/ext/ruby_whisper_vad_params.c +288 -0
  18. data/ext/sources/.dockerignore +3 -0
  19. data/ext/sources/.github/workflows/bindings-ruby.yml +21 -0
  20. data/ext/sources/CMakeGraphVizOptions.cmake +8 -0
  21. data/ext/sources/CMakeLists.txt +251 -0
  22. data/ext/sources/bindings/javascript/CMakeLists.txt +41 -0
  23. data/ext/sources/bindings/javascript/emscripten.cpp +93 -0
  24. data/ext/sources/bindings/javascript/libwhisper.worker.js +1 -0
  25. data/ext/sources/bindings/javascript/package-tmpl.json +26 -0
  26. data/ext/sources/bindings/javascript/package.json +26 -0
  27. data/ext/sources/bindings/javascript/whisper.js +19 -0
  28. data/ext/sources/build-xcframework.sh +547 -0
  29. data/ext/sources/ci/run.sh +336 -0
  30. data/ext/sources/close-issue.yml +28 -0
  31. data/ext/sources/cmake/DefaultTargetOptions.cmake +16 -0
  32. data/ext/sources/cmake/FindFFmpeg.cmake +163 -0
  33. data/ext/sources/cmake/build-info.cmake +60 -0
  34. data/ext/sources/cmake/git-vars.cmake +22 -0
  35. data/ext/sources/cmake/whisper-config.cmake.in +65 -0
  36. data/ext/sources/cmake/whisper.pc.in +10 -0
  37. data/ext/sources/examples/CMakeLists.txt +124 -0
  38. data/ext/sources/examples/addon.node/CMakeLists.txt +31 -0
  39. data/ext/sources/examples/addon.node/__test__/whisper.spec.js +37 -0
  40. data/ext/sources/examples/addon.node/addon.cpp +438 -0
  41. data/ext/sources/examples/addon.node/index.js +54 -0
  42. data/ext/sources/examples/addon.node/package.json +16 -0
  43. data/ext/sources/examples/bench/CMakeLists.txt +8 -0
  44. data/ext/sources/examples/bench/bench.cpp +175 -0
  45. data/ext/sources/examples/bench.wasm/CMakeLists.txt +49 -0
  46. data/ext/sources/examples/bench.wasm/emscripten.cpp +87 -0
  47. data/ext/sources/examples/bench.wasm/index-tmpl.html +284 -0
  48. data/ext/sources/examples/cli/CMakeLists.txt +8 -0
  49. data/ext/sources/examples/cli/cli.cpp +1294 -0
  50. data/ext/sources/examples/coi-serviceworker.js +146 -0
  51. data/ext/sources/examples/command/CMakeLists.txt +10 -0
  52. data/ext/sources/examples/command/command.cpp +776 -0
  53. data/ext/sources/examples/command/commands.txt +9 -0
  54. data/ext/sources/examples/command.wasm/CMakeLists.txt +50 -0
  55. data/ext/sources/examples/command.wasm/emscripten.cpp +327 -0
  56. data/ext/sources/examples/command.wasm/index-tmpl.html +414 -0
  57. data/ext/sources/examples/common-ggml.cpp +238 -0
  58. data/ext/sources/examples/common-ggml.h +18 -0
  59. data/ext/sources/examples/common-sdl.cpp +227 -0
  60. data/ext/sources/examples/common-sdl.h +49 -0
  61. data/ext/sources/examples/common-whisper.cpp +168 -0
  62. data/ext/sources/examples/common-whisper.h +24 -0
  63. data/ext/sources/examples/common.cpp +675 -0
  64. data/ext/sources/examples/common.h +322 -0
  65. data/ext/sources/examples/deprecation-warning/CMakeLists.txt +6 -0
  66. data/ext/sources/examples/deprecation-warning/deprecation-warning.cpp +38 -0
  67. data/ext/sources/examples/ffmpeg-transcode.cpp +368 -0
  68. data/ext/sources/examples/generate-karaoke.sh +57 -0
  69. data/ext/sources/examples/grammar-parser.cpp +423 -0
  70. data/ext/sources/examples/grammar-parser.h +29 -0
  71. data/ext/sources/examples/helpers.js +191 -0
  72. data/ext/sources/examples/json.hpp +24596 -0
  73. data/ext/sources/examples/livestream.sh +112 -0
  74. data/ext/sources/examples/lsp/CMakeLists.txt +9 -0
  75. data/ext/sources/examples/lsp/lsp.cpp +467 -0
  76. data/ext/sources/examples/lsp/whisper.vim +362 -0
  77. data/ext/sources/examples/miniaudio.h +93468 -0
  78. data/ext/sources/examples/python/test_whisper_processor.py +7 -0
  79. data/ext/sources/examples/python/whisper_processor.py +54 -0
  80. data/ext/sources/examples/quantize/CMakeLists.txt +6 -0
  81. data/ext/sources/examples/quantize/quantize.cpp +223 -0
  82. data/ext/sources/examples/server/CMakeLists.txt +12 -0
  83. data/ext/sources/examples/server/bench.js +29 -0
  84. data/ext/sources/examples/server/httplib.h +10497 -0
  85. data/ext/sources/examples/server/server.cpp +1091 -0
  86. data/ext/sources/examples/server.py +115 -0
  87. data/ext/sources/examples/stb_vorbis.c +5584 -0
  88. data/ext/sources/examples/stream/CMakeLists.txt +10 -0
  89. data/ext/sources/examples/stream/stream.cpp +429 -0
  90. data/ext/sources/examples/stream.wasm/CMakeLists.txt +49 -0
  91. data/ext/sources/examples/stream.wasm/emscripten.cpp +216 -0
  92. data/ext/sources/examples/stream.wasm/index-tmpl.html +414 -0
  93. data/ext/sources/examples/sycl/CMakeLists.txt +9 -0
  94. data/ext/sources/examples/sycl/build.sh +22 -0
  95. data/ext/sources/examples/sycl/ls-sycl-device.cpp +11 -0
  96. data/ext/sources/examples/sycl/run-whisper.sh +17 -0
  97. data/ext/sources/examples/talk-llama/CMakeLists.txt +40 -0
  98. data/ext/sources/examples/talk-llama/eleven-labs.py +80 -0
  99. data/ext/sources/examples/talk-llama/llama-adapter.cpp +388 -0
  100. data/ext/sources/examples/talk-llama/llama-adapter.h +76 -0
  101. data/ext/sources/examples/talk-llama/llama-arch.cpp +1746 -0
  102. data/ext/sources/examples/talk-llama/llama-arch.h +437 -0
  103. data/ext/sources/examples/talk-llama/llama-batch.cpp +374 -0
  104. data/ext/sources/examples/talk-llama/llama-batch.h +89 -0
  105. data/ext/sources/examples/talk-llama/llama-chat.cpp +663 -0
  106. data/ext/sources/examples/talk-llama/llama-chat.h +58 -0
  107. data/ext/sources/examples/talk-llama/llama-context.cpp +2676 -0
  108. data/ext/sources/examples/talk-llama/llama-context.h +276 -0
  109. data/ext/sources/examples/talk-llama/llama-cparams.cpp +5 -0
  110. data/ext/sources/examples/talk-llama/llama-cparams.h +41 -0
  111. data/ext/sources/examples/talk-llama/llama-grammar.cpp +1229 -0
  112. data/ext/sources/examples/talk-llama/llama-grammar.h +173 -0
  113. data/ext/sources/examples/talk-llama/llama-graph.cpp +1618 -0
  114. data/ext/sources/examples/talk-llama/llama-graph.h +640 -0
  115. data/ext/sources/examples/talk-llama/llama-hparams.cpp +95 -0
  116. data/ext/sources/examples/talk-llama/llama-hparams.h +190 -0
  117. data/ext/sources/examples/talk-llama/llama-impl.cpp +167 -0
  118. data/ext/sources/examples/talk-llama/llama-impl.h +61 -0
  119. data/ext/sources/examples/talk-llama/llama-io.cpp +15 -0
  120. data/ext/sources/examples/talk-llama/llama-io.h +35 -0
  121. data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +2739 -0
  122. data/ext/sources/examples/talk-llama/llama-kv-cache.h +502 -0
  123. data/ext/sources/examples/talk-llama/llama-kv-cells.h +379 -0
  124. data/ext/sources/examples/talk-llama/llama-memory.cpp +1 -0
  125. data/ext/sources/examples/talk-llama/llama-memory.h +32 -0
  126. data/ext/sources/examples/talk-llama/llama-mmap.cpp +600 -0
  127. data/ext/sources/examples/talk-llama/llama-mmap.h +68 -0
  128. data/ext/sources/examples/talk-llama/llama-model-loader.cpp +1138 -0
  129. data/ext/sources/examples/talk-llama/llama-model-loader.h +169 -0
  130. data/ext/sources/examples/talk-llama/llama-model-saver.cpp +281 -0
  131. data/ext/sources/examples/talk-llama/llama-model-saver.h +37 -0
  132. data/ext/sources/examples/talk-llama/llama-model.cpp +13814 -0
  133. data/ext/sources/examples/talk-llama/llama-model.h +425 -0
  134. data/ext/sources/examples/talk-llama/llama-quant.cpp +966 -0
  135. data/ext/sources/examples/talk-llama/llama-quant.h +1 -0
  136. data/ext/sources/examples/talk-llama/llama-sampling.cpp +2575 -0
  137. data/ext/sources/examples/talk-llama/llama-sampling.h +32 -0
  138. data/ext/sources/examples/talk-llama/llama-vocab.cpp +3340 -0
  139. data/ext/sources/examples/talk-llama/llama-vocab.h +131 -0
  140. data/ext/sources/examples/talk-llama/llama.cpp +354 -0
  141. data/ext/sources/examples/talk-llama/llama.h +1377 -0
  142. data/ext/sources/examples/talk-llama/prompts/talk-alpaca.txt +23 -0
  143. data/ext/sources/examples/talk-llama/speak +40 -0
  144. data/ext/sources/examples/talk-llama/speak.bat +1 -0
  145. data/ext/sources/examples/talk-llama/speak.ps1 +14 -0
  146. data/ext/sources/examples/talk-llama/talk-llama.cpp +808 -0
  147. data/ext/sources/examples/talk-llama/unicode-data.cpp +7034 -0
  148. data/ext/sources/examples/talk-llama/unicode-data.h +20 -0
  149. data/ext/sources/examples/talk-llama/unicode.cpp +849 -0
  150. data/ext/sources/examples/talk-llama/unicode.h +66 -0
  151. data/ext/sources/examples/vad-speech-segments/CMakeLists.txt +8 -0
  152. data/ext/sources/examples/vad-speech-segments/speech.cpp +143 -0
  153. data/ext/sources/examples/wchess/CMakeLists.txt +10 -0
  154. data/ext/sources/examples/wchess/libwchess/CMakeLists.txt +19 -0
  155. data/ext/sources/examples/wchess/libwchess/Chessboard.cpp +803 -0
  156. data/ext/sources/examples/wchess/libwchess/Chessboard.h +33 -0
  157. data/ext/sources/examples/wchess/libwchess/WChess.cpp +193 -0
  158. data/ext/sources/examples/wchess/libwchess/WChess.h +63 -0
  159. data/ext/sources/examples/wchess/libwchess/test-chessboard.cpp +117 -0
  160. data/ext/sources/examples/wchess/wchess.cmd/CMakeLists.txt +8 -0
  161. data/ext/sources/examples/wchess/wchess.cmd/wchess.cmd.cpp +249 -0
  162. data/ext/sources/examples/whisper.wasm/CMakeLists.txt +50 -0
  163. data/ext/sources/examples/whisper.wasm/emscripten.cpp +118 -0
  164. data/ext/sources/examples/whisper.wasm/index-tmpl.html +658 -0
  165. data/ext/sources/ggml/CMakeLists.txt +390 -0
  166. data/ext/sources/ggml/cmake/BuildTypes.cmake +54 -0
  167. data/ext/sources/ggml/cmake/GitVars.cmake +22 -0
  168. data/ext/sources/ggml/cmake/common.cmake +26 -0
  169. data/ext/sources/ggml/cmake/ggml-config.cmake.in +152 -0
  170. data/ext/{ggml → sources/ggml}/include/ggml-alloc.h +1 -1
  171. data/ext/{ggml → sources/ggml}/include/ggml-backend.h +9 -7
  172. data/ext/{ggml → sources/ggml}/include/ggml-cpp.h +2 -1
  173. data/ext/{ggml → sources/ggml}/include/ggml-cpu.h +9 -1
  174. data/ext/{ggml → sources/ggml}/include/ggml-metal.h +1 -1
  175. data/ext/{ggml → sources/ggml}/include/ggml-opt.h +49 -28
  176. data/ext/{ggml → sources/ggml}/include/ggml-rpc.h +6 -1
  177. data/ext/{ggml → sources/ggml}/include/ggml-vulkan.h +0 -2
  178. data/ext/{ggml → sources/ggml}/include/ggml.h +182 -265
  179. data/ext/sources/ggml/include/gguf.h +202 -0
  180. data/ext/sources/ggml/src/CMakeLists.txt +346 -0
  181. data/ext/{ggml → sources/ggml}/src/ggml-alloc.c +34 -29
  182. data/ext/sources/ggml/src/ggml-amx/CMakeLists.txt +107 -0
  183. data/ext/{ggml → sources/ggml}/src/ggml-backend-impl.h +1 -2
  184. data/ext/{ggml → sources/ggml}/src/ggml-backend-reg.cpp +87 -53
  185. data/ext/{ggml → sources/ggml}/src/ggml-backend.cpp +26 -14
  186. data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +87 -0
  187. data/ext/sources/ggml/src/ggml-cann/CMakeLists.txt +74 -0
  188. data/ext/sources/ggml/src/ggml-cann/Doxyfile +2579 -0
  189. data/ext/{ggml → sources/ggml}/src/ggml-cann/acl_tensor.cpp +10 -4
  190. data/ext/{ggml → sources/ggml}/src/ggml-cann/acl_tensor.h +5 -5
  191. data/ext/{ggml → sources/ggml}/src/ggml-cann/aclnn_ops.cpp +1272 -1506
  192. data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +1125 -0
  193. data/ext/{ggml → sources/ggml}/src/ggml-cann/common.h +135 -1
  194. data/ext/{ggml → sources/ggml}/src/ggml-cann/ggml-cann.cpp +564 -146
  195. data/ext/sources/ggml/src/ggml-cann/kernels/CMakeLists.txt +30 -0
  196. data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/dup.cpp +3 -5
  197. data/ext/{ggml → sources/ggml}/src/ggml-common.h +12 -8
  198. data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +504 -0
  199. data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/amx.cpp +2 -1
  200. data/ext/sources/ggml/src/ggml-cpu/binary-ops.cpp +158 -0
  201. data/ext/sources/ggml/src/ggml-cpu/binary-ops.h +16 -0
  202. data/ext/sources/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +100 -0
  203. data/ext/sources/ggml/src/ggml-cpu/common.h +72 -0
  204. data/ext/{ggml → sources/ggml}/src/ggml-cpu/cpu-feats-x86.cpp +5 -1
  205. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +6431 -0
  206. data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-impl.h +163 -41
  207. data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-quants.c +4029 -1117
  208. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +3510 -0
  209. data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu.cpp +67 -18
  210. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +337 -0
  211. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.h +95 -0
  212. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +482 -0
  213. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.h +17 -0
  214. data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +3544 -0
  215. data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.h +14 -0
  216. data/ext/sources/ggml/src/ggml-cpu/ops.cpp +8903 -0
  217. data/ext/sources/ggml/src/ggml-cpu/ops.h +110 -0
  218. data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +892 -0
  219. data/ext/sources/ggml/src/ggml-cpu/unary-ops.cpp +186 -0
  220. data/ext/sources/ggml/src/ggml-cpu/unary-ops.h +28 -0
  221. data/ext/sources/ggml/src/ggml-cpu/vec.cpp +252 -0
  222. data/ext/sources/ggml/src/ggml-cpu/vec.h +818 -0
  223. data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +184 -0
  224. data/ext/sources/ggml/src/ggml-cuda/acc.cu +61 -0
  225. data/ext/sources/ggml/src/ggml-cuda/acc.cuh +5 -0
  226. data/ext/sources/ggml/src/ggml-cuda/arange.cu +34 -0
  227. data/ext/sources/ggml/src/ggml-cuda/arange.cuh +5 -0
  228. data/ext/sources/ggml/src/ggml-cuda/argmax.cu +91 -0
  229. data/ext/sources/ggml/src/ggml-cuda/argmax.cuh +3 -0
  230. data/ext/sources/ggml/src/ggml-cuda/argsort.cu +104 -0
  231. data/ext/sources/ggml/src/ggml-cuda/argsort.cuh +3 -0
  232. data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +363 -0
  233. data/ext/sources/ggml/src/ggml-cuda/binbcast.cuh +9 -0
  234. data/ext/sources/ggml/src/ggml-cuda/clamp.cu +45 -0
  235. data/ext/sources/ggml/src/ggml-cuda/clamp.cuh +5 -0
  236. data/ext/sources/ggml/src/ggml-cuda/common.cuh +828 -0
  237. data/ext/sources/ggml/src/ggml-cuda/concat.cu +221 -0
  238. data/ext/sources/ggml/src/ggml-cuda/concat.cuh +5 -0
  239. data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cu +89 -0
  240. data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cuh +5 -0
  241. data/ext/sources/ggml/src/ggml-cuda/convert.cu +730 -0
  242. data/ext/sources/ggml/src/ggml-cuda/convert.cuh +26 -0
  243. data/ext/sources/ggml/src/ggml-cuda/count-equal.cu +64 -0
  244. data/ext/sources/ggml/src/ggml-cuda/count-equal.cuh +5 -0
  245. data/ext/sources/ggml/src/ggml-cuda/cp-async.cuh +57 -0
  246. data/ext/sources/ggml/src/ggml-cuda/cpy.cu +705 -0
  247. data/ext/sources/ggml/src/ggml-cuda/cpy.cuh +11 -0
  248. data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cu +189 -0
  249. data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cuh +7 -0
  250. data/ext/sources/ggml/src/ggml-cuda/dequantize.cuh +103 -0
  251. data/ext/sources/ggml/src/ggml-cuda/diagmask.cu +40 -0
  252. data/ext/sources/ggml/src/ggml-cuda/diagmask.cuh +5 -0
  253. data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +881 -0
  254. data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +1471 -0
  255. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cu +357 -0
  256. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cuh +3 -0
  257. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cu +365 -0
  258. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cuh +3 -0
  259. data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f16.cuh +482 -0
  260. data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f32.cuh +472 -0
  261. data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +634 -0
  262. data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +3 -0
  263. data/ext/sources/ggml/src/ggml-cuda/fattn.cu +346 -0
  264. data/ext/sources/ggml/src/ggml-cuda/fattn.cuh +3 -0
  265. data/ext/sources/ggml/src/ggml-cuda/getrows.cu +275 -0
  266. data/ext/sources/ggml/src/ggml-cuda/getrows.cuh +15 -0
  267. data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +3505 -0
  268. data/ext/sources/ggml/src/ggml-cuda/gla.cu +93 -0
  269. data/ext/sources/ggml/src/ggml-cuda/gla.cuh +3 -0
  270. data/ext/sources/ggml/src/ggml-cuda/im2col.cu +103 -0
  271. data/ext/sources/ggml/src/ggml-cuda/im2col.cuh +5 -0
  272. data/ext/sources/ggml/src/ggml-cuda/mma.cuh +396 -0
  273. data/ext/sources/ggml/src/ggml-cuda/mmq.cu +324 -0
  274. data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +3217 -0
  275. data/ext/sources/ggml/src/ggml-cuda/mmv.cu +336 -0
  276. data/ext/sources/ggml/src/ggml-cuda/mmv.cuh +12 -0
  277. data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +595 -0
  278. data/ext/sources/ggml/src/ggml-cuda/mmvq.cuh +12 -0
  279. data/ext/sources/ggml/src/ggml-cuda/norm.cu +458 -0
  280. data/ext/sources/ggml/src/ggml-cuda/norm.cuh +11 -0
  281. data/ext/sources/ggml/src/ggml-cuda/opt-step-adamw.cu +78 -0
  282. data/ext/sources/ggml/src/ggml-cuda/opt-step-adamw.cuh +5 -0
  283. data/ext/sources/ggml/src/ggml-cuda/out-prod.cu +68 -0
  284. data/ext/sources/ggml/src/ggml-cuda/out-prod.cuh +3 -0
  285. data/ext/sources/ggml/src/ggml-cuda/pad.cu +49 -0
  286. data/ext/sources/ggml/src/ggml-cuda/pad.cuh +5 -0
  287. data/ext/sources/ggml/src/ggml-cuda/pool2d.cu +94 -0
  288. data/ext/sources/ggml/src/ggml-cuda/pool2d.cuh +5 -0
  289. data/ext/sources/ggml/src/ggml-cuda/quantize.cu +190 -0
  290. data/ext/sources/ggml/src/ggml-cuda/quantize.cuh +27 -0
  291. data/ext/sources/ggml/src/ggml-cuda/rope.cu +456 -0
  292. data/ext/sources/ggml/src/ggml-cuda/rope.cuh +7 -0
  293. data/ext/sources/ggml/src/ggml-cuda/scale.cu +31 -0
  294. data/ext/sources/ggml/src/ggml-cuda/scale.cuh +5 -0
  295. data/ext/sources/ggml/src/ggml-cuda/softmax.cu +283 -0
  296. data/ext/sources/ggml/src/ggml-cuda/softmax.cuh +7 -0
  297. data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +148 -0
  298. data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cuh +3 -0
  299. data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cu +153 -0
  300. data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cuh +3 -0
  301. data/ext/sources/ggml/src/ggml-cuda/sum.cu +45 -0
  302. data/ext/sources/ggml/src/ggml-cuda/sum.cuh +5 -0
  303. data/ext/sources/ggml/src/ggml-cuda/sumrows.cu +39 -0
  304. data/ext/sources/ggml/src/ggml-cuda/sumrows.cuh +5 -0
  305. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_16.cu +5 -0
  306. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_8.cu +10 -0
  307. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_1.cu +10 -0
  308. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_2.cu +10 -0
  309. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +10 -0
  310. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_16.cu +5 -0
  311. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +10 -0
  312. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_8.cu +10 -0
  313. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_1.cu +10 -0
  314. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_2.cu +10 -0
  315. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_16.cu +5 -0
  316. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_2.cu +10 -0
  317. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +10 -0
  318. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_8.cu +10 -0
  319. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_64-ncols2_1.cu +10 -0
  320. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_1.cu +10 -0
  321. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_2.cu +10 -0
  322. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +10 -0
  323. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_8.cu +10 -0
  324. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +5 -0
  325. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +5 -0
  326. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +5 -0
  327. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +5 -0
  328. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +5 -0
  329. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +5 -0
  330. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +5 -0
  331. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +5 -0
  332. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +5 -0
  333. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +5 -0
  334. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +5 -0
  335. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +5 -0
  336. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +5 -0
  337. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +5 -0
  338. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +5 -0
  339. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +5 -0
  340. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +5 -0
  341. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +5 -0
  342. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +5 -0
  343. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +5 -0
  344. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +5 -0
  345. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +5 -0
  346. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +5 -0
  347. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +5 -0
  348. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +5 -0
  349. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +5 -0
  350. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +5 -0
  351. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +5 -0
  352. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +5 -0
  353. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +5 -0
  354. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +5 -0
  355. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +5 -0
  356. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +5 -0
  357. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +5 -0
  358. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +5 -0
  359. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +5 -0
  360. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +5 -0
  361. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +5 -0
  362. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +5 -0
  363. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +5 -0
  364. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +5 -0
  365. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +5 -0
  366. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +5 -0
  367. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +5 -0
  368. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +5 -0
  369. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +5 -0
  370. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +5 -0
  371. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +5 -0
  372. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +5 -0
  373. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +5 -0
  374. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +5 -0
  375. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +5 -0
  376. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +5 -0
  377. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +5 -0
  378. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +5 -0
  379. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +5 -0
  380. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +5 -0
  381. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +5 -0
  382. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +5 -0
  383. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +5 -0
  384. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +5 -0
  385. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +5 -0
  386. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +5 -0
  387. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +5 -0
  388. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +5 -0
  389. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +5 -0
  390. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +5 -0
  391. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +5 -0
  392. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +5 -0
  393. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +5 -0
  394. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +5 -0
  395. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +5 -0
  396. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +5 -0
  397. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +5 -0
  398. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +5 -0
  399. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +5 -0
  400. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +5 -0
  401. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +5 -0
  402. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +5 -0
  403. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +5 -0
  404. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +5 -0
  405. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +5 -0
  406. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +5 -0
  407. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +5 -0
  408. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +5 -0
  409. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +5 -0
  410. data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +78 -0
  411. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_s.cu +5 -0
  412. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_s.cu +5 -0
  413. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu +5 -0
  414. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu +5 -0
  415. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_s.cu +5 -0
  416. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu +5 -0
  417. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu +5 -0
  418. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu +5 -0
  419. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q2_k.cu +5 -0
  420. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q3_k.cu +5 -0
  421. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_0.cu +5 -0
  422. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_1.cu +5 -0
  423. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_k.cu +5 -0
  424. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_0.cu +5 -0
  425. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_1.cu +5 -0
  426. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_k.cu +5 -0
  427. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q6_k.cu +5 -0
  428. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q8_0.cu +5 -0
  429. data/ext/sources/ggml/src/ggml-cuda/tsembd.cu +47 -0
  430. data/ext/sources/ggml/src/ggml-cuda/tsembd.cuh +5 -0
  431. data/ext/sources/ggml/src/ggml-cuda/unary.cu +289 -0
  432. data/ext/sources/ggml/src/ggml-cuda/unary.cuh +59 -0
  433. data/ext/sources/ggml/src/ggml-cuda/upscale.cu +51 -0
  434. data/ext/sources/ggml/src/ggml-cuda/upscale.cuh +5 -0
  435. data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +1135 -0
  436. data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/cuda.h +1 -0
  437. data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/hip.h +57 -0
  438. data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/musa.h +7 -1
  439. data/ext/sources/ggml/src/ggml-cuda/wkv.cu +199 -0
  440. data/ext/sources/ggml/src/ggml-cuda/wkv.cuh +7 -0
  441. data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +131 -0
  442. data/ext/{ggml → sources/ggml}/src/ggml-impl.h +64 -19
  443. data/ext/sources/ggml/src/ggml-kompute/CMakeLists.txt +166 -0
  444. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/common.comp +112 -0
  445. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_add.comp +58 -0
  446. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_addrow.comp +25 -0
  447. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f16.comp +52 -0
  448. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f32.comp +52 -0
  449. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f16.comp +52 -0
  450. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f32.comp +52 -0
  451. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_diagmask.comp +30 -0
  452. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_gelu.comp +22 -0
  453. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows.comp +17 -0
  454. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f16.comp +31 -0
  455. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f32.comp +31 -0
  456. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_0.comp +38 -0
  457. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_1.comp +39 -0
  458. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q6_k.comp +44 -0
  459. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul.comp +52 -0
  460. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp +69 -0
  461. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_mat_f32.comp +51 -0
  462. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_0.comp +33 -0
  463. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_1.comp +35 -0
  464. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp +140 -0
  465. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp +106 -0
  466. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q8_0.comp +73 -0
  467. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp +52 -0
  468. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp +28 -0
  469. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_norm.comp +84 -0
  470. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_relu.comp +21 -0
  471. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rmsnorm.comp +53 -0
  472. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp +52 -0
  473. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp +52 -0
  474. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp +52 -0
  475. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp +52 -0
  476. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale.comp +19 -0
  477. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale_8.comp +23 -0
  478. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_silu.comp +22 -0
  479. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp +72 -0
  480. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp +71 -0
  481. data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +120 -0
  482. data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +622 -0
  483. data/ext/{ggml → sources/ggml}/src/ggml-metal/ggml-metal.m +2178 -1064
  484. data/ext/{ggml → sources/ggml}/src/ggml-metal/ggml-metal.metal +1575 -1218
  485. data/ext/sources/ggml/src/ggml-musa/CMakeLists.txt +113 -0
  486. data/ext/sources/ggml/src/ggml-musa/mudnn.cu +112 -0
  487. data/ext/sources/ggml/src/ggml-musa/mudnn.cuh +12 -0
  488. data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +96 -0
  489. data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +5124 -0
  490. data/ext/sources/ggml/src/ggml-opencl/kernels/add.cl +83 -0
  491. data/ext/sources/ggml/src/ggml-opencl/kernels/clamp.cl +20 -0
  492. data/ext/sources/ggml/src/ggml-opencl/kernels/cpy.cl +184 -0
  493. data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +118 -0
  494. data/ext/sources/ggml/src/ggml-opencl/kernels/diag_mask_inf.cl +58 -0
  495. data/ext/sources/ggml/src/ggml-opencl/kernels/embed_kernel.py +26 -0
  496. data/ext/sources/ggml/src/ggml-opencl/kernels/gelu.cl +62 -0
  497. data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle.cl +268 -0
  498. data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general.cl +274 -0
  499. data/ext/sources/ggml/src/ggml-opencl/kernels/get_rows.cl +163 -0
  500. data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f16.cl +57 -0
  501. data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f32.cl +57 -0
  502. data/ext/sources/ggml/src/ggml-opencl/kernels/mul.cl +79 -0
  503. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mat_Ab_Bi_8x4.cl +139 -0
  504. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f16.cl +118 -0
  505. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32.cl +118 -0
  506. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_1row.cl +94 -0
  507. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_l4.cl +84 -0
  508. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f32_f32.cl +118 -0
  509. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32.cl +192 -0
  510. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_16x_flat.cl +307 -0
  511. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_8x_flat.cl +265 -0
  512. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_8x_flat.cl +272 -0
  513. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_v.cl +254 -0
  514. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q6_k.cl +190 -0
  515. data/ext/sources/ggml/src/ggml-opencl/kernels/norm.cl +81 -0
  516. data/ext/sources/ggml/src/ggml-opencl/kernels/relu.cl +16 -0
  517. data/ext/sources/ggml/src/ggml-opencl/kernels/rms_norm.cl +96 -0
  518. data/ext/sources/ggml/src/ggml-opencl/kernels/rope.cl +721 -0
  519. data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +16 -0
  520. data/ext/sources/ggml/src/ggml-opencl/kernels/silu.cl +30 -0
  521. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f16.cl +87 -0
  522. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f32.cl +87 -0
  523. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f16.cl +86 -0
  524. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f32.cl +86 -0
  525. data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +84 -0
  526. data/ext/{ggml → sources/ggml}/src/ggml-opt.cpp +373 -190
  527. data/ext/{ggml → sources/ggml}/src/ggml-quants.c +114 -120
  528. data/ext/sources/ggml/src/ggml-rpc/CMakeLists.txt +9 -0
  529. data/ext/{ggml → sources/ggml}/src/ggml-rpc/ggml-rpc.cpp +480 -73
  530. data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +189 -0
  531. data/ext/sources/ggml/src/ggml-sycl/backend.hpp +37 -0
  532. data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +345 -0
  533. data/ext/sources/ggml/src/ggml-sycl/binbcast.hpp +39 -0
  534. data/ext/{ggml → sources/ggml}/src/ggml-sycl/common.cpp +20 -32
  535. data/ext/sources/ggml/src/ggml-sycl/common.hpp +589 -0
  536. data/ext/{ggml → sources/ggml}/src/ggml-sycl/concat.cpp +32 -33
  537. data/ext/sources/ggml/src/ggml-sycl/concat.hpp +20 -0
  538. data/ext/{ggml → sources/ggml}/src/ggml-sycl/conv.cpp +4 -2
  539. data/ext/sources/ggml/src/ggml-sycl/conv.hpp +20 -0
  540. data/ext/{ggml → sources/ggml}/src/ggml-sycl/convert.cpp +104 -28
  541. data/ext/sources/ggml/src/ggml-sycl/convert.hpp +34 -0
  542. data/ext/sources/ggml/src/ggml-sycl/cpy.cpp +700 -0
  543. data/ext/sources/ggml/src/ggml-sycl/cpy.hpp +11 -0
  544. data/ext/sources/ggml/src/ggml-sycl/dequantize.hpp +791 -0
  545. data/ext/{ggml → sources/ggml}/src/ggml-sycl/dmmv.cpp +156 -17
  546. data/ext/sources/ggml/src/ggml-sycl/dmmv.hpp +27 -0
  547. data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +2957 -0
  548. data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +1511 -0
  549. data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +75 -0
  550. data/ext/sources/ggml/src/ggml-sycl/gemm.hpp +99 -0
  551. data/ext/sources/ggml/src/ggml-sycl/getrows.cpp +309 -0
  552. data/ext/sources/ggml/src/ggml-sycl/getrows.hpp +20 -0
  553. data/ext/{ggml → sources/ggml}/src/ggml-sycl/ggml-sycl.cpp +1004 -1240
  554. data/ext/sources/ggml/src/ggml-sycl/gla.cpp +106 -0
  555. data/ext/sources/ggml/src/ggml-sycl/gla.hpp +8 -0
  556. data/ext/sources/ggml/src/ggml-sycl/im2col.cpp +136 -0
  557. data/ext/sources/ggml/src/ggml-sycl/im2col.hpp +21 -0
  558. data/ext/{ggml → sources/ggml}/src/ggml-sycl/mmq.cpp +0 -1
  559. data/ext/sources/ggml/src/ggml-sycl/mmq.hpp +33 -0
  560. data/ext/{ggml → sources/ggml}/src/ggml-sycl/mmvq.cpp +261 -166
  561. data/ext/sources/ggml/src/ggml-sycl/mmvq.hpp +27 -0
  562. data/ext/{ggml → sources/ggml}/src/ggml-sycl/norm.cpp +204 -81
  563. data/ext/sources/ggml/src/ggml-sycl/norm.hpp +26 -0
  564. data/ext/{ggml → sources/ggml}/src/ggml-sycl/outprod.cpp +8 -17
  565. data/ext/sources/ggml/src/ggml-sycl/outprod.hpp +10 -0
  566. data/ext/sources/ggml/src/ggml-sycl/presets.hpp +74 -0
  567. data/ext/sources/ggml/src/ggml-sycl/quants.hpp +83 -0
  568. data/ext/sources/ggml/src/ggml-sycl/rope.cpp +361 -0
  569. data/ext/sources/ggml/src/ggml-sycl/rope.hpp +20 -0
  570. data/ext/{ggml → sources/ggml}/src/ggml-sycl/softmax.cpp +35 -25
  571. data/ext/sources/ggml/src/ggml-sycl/softmax.hpp +20 -0
  572. data/ext/sources/ggml/src/ggml-sycl/sycl_hw.cpp +13 -0
  573. data/ext/sources/ggml/src/ggml-sycl/sycl_hw.hpp +23 -0
  574. data/ext/{ggml → sources/ggml}/src/ggml-sycl/tsembd.cpp +3 -3
  575. data/ext/sources/ggml/src/ggml-sycl/tsembd.hpp +20 -0
  576. data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +1215 -0
  577. data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +293 -0
  578. data/ext/sources/ggml/src/ggml-sycl/wkv.hpp +10 -0
  579. data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +196 -0
  580. data/ext/sources/ggml/src/ggml-vulkan/cmake/host-toolchain.cmake.in +15 -0
  581. data/ext/{ggml → sources/ggml}/src/ggml-vulkan/ggml-vulkan.cpp +3130 -1087
  582. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +39 -0
  583. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +29 -0
  584. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +29 -0
  585. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +51 -0
  586. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +69 -0
  587. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp +17 -0
  588. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp +41 -0
  589. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp +49 -0
  590. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp +105 -0
  591. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp +23 -0
  592. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +51 -0
  593. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +242 -0
  594. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp +17 -0
  595. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/count_equal.comp +31 -0
  596. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp +20 -0
  597. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +462 -0
  598. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +699 -0
  599. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_head.comp +13 -0
  600. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_m.comp +42 -0
  601. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_s.comp +35 -0
  602. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +44 -0
  603. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp +43 -0
  604. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +48 -0
  605. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +39 -0
  606. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +49 -0
  607. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp +32 -0
  608. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp +34 -0
  609. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +34 -0
  610. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +42 -0
  611. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp +30 -0
  612. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp +32 -0
  613. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +68 -0
  614. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp +34 -0
  615. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp +35 -0
  616. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +70 -0
  617. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +33 -0
  618. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp +31 -0
  619. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp +34 -0
  620. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/div.comp +27 -0
  621. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +337 -0
  622. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.comp +162 -0
  623. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +360 -0
  624. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +267 -0
  625. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +59 -0
  626. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp +25 -0
  627. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp +23 -0
  628. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp +64 -0
  629. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_head.comp +9 -0
  630. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp +76 -0
  631. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +33 -0
  632. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +41 -0
  633. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp +66 -0
  634. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +100 -0
  635. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +41 -0
  636. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp +22 -0
  637. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp +27 -0
  638. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp +48 -0
  639. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +169 -0
  640. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +118 -0
  641. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_m.comp +82 -0
  642. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp +79 -0
  643. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_s.comp +90 -0
  644. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xs.comp +87 -0
  645. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xxs.comp +87 -0
  646. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_s.comp +90 -0
  647. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_xxs.comp +88 -0
  648. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +118 -0
  649. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp +154 -0
  650. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +130 -0
  651. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +132 -0
  652. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +136 -0
  653. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +167 -0
  654. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +130 -0
  655. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +868 -0
  656. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +441 -0
  657. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +442 -0
  658. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +99 -0
  659. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp +44 -0
  660. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_adamw.comp +42 -0
  661. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +28 -0
  662. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp +74 -0
  663. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +77 -0
  664. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp +21 -0
  665. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp +26 -0
  666. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat_back.comp +37 -0
  667. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +52 -0
  668. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_back.comp +55 -0
  669. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +58 -0
  670. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +60 -0
  671. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +43 -0
  672. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +43 -0
  673. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +47 -0
  674. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +24 -0
  675. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sigmoid.comp +20 -0
  676. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp +22 -0
  677. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu_back.comp +26 -0
  678. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp +17 -0
  679. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +173 -0
  680. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +50 -0
  681. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/square.comp +17 -0
  682. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sub.comp +29 -0
  683. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +37 -0
  684. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp +20 -0
  685. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_bfloat16_support.comp +7 -0
  686. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp +7 -0
  687. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp +7 -0
  688. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_integer_dot_support.comp +7 -0
  689. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +41 -0
  690. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +1373 -0
  691. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +36 -0
  692. data/ext/{ggml → sources/ggml}/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +193 -35
  693. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp +87 -0
  694. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/wkv7.comp +91 -0
  695. data/ext/{ggml → sources/ggml}/src/ggml.c +676 -1820
  696. data/ext/sources/ggml/src/gguf.cpp +1330 -0
  697. data/ext/{include → sources/include}/whisper.h +68 -2
  698. data/ext/sources/src/CMakeLists.txt +143 -0
  699. data/ext/{src → sources/src}/coreml/whisper-decoder-impl.h +27 -15
  700. data/ext/{src → sources/src}/coreml/whisper-decoder-impl.m +35 -10
  701. data/ext/{src → sources/src}/coreml/whisper-encoder-impl.h +21 -9
  702. data/ext/{src → sources/src}/coreml/whisper-encoder-impl.m +28 -3
  703. data/ext/sources/src/coreml/whisper-encoder.mm +73 -0
  704. data/ext/sources/src/whisper-arch.h +197 -0
  705. data/ext/{src → sources/src}/whisper.cpp +1905 -374
  706. data/ext/sources/tests/CMakeLists.txt +105 -0
  707. data/ext/sources/tests/earnings21/eval.mk +58 -0
  708. data/ext/sources/tests/earnings21/eval.py +68 -0
  709. data/ext/sources/tests/earnings21/normalizers/__init__.py +2 -0
  710. data/ext/sources/tests/earnings21/normalizers/basic.py +80 -0
  711. data/ext/sources/tests/earnings21/normalizers/english.json +1741 -0
  712. data/ext/sources/tests/earnings21/normalizers/english.py +550 -0
  713. data/ext/sources/tests/earnings21/requirements.txt +6 -0
  714. data/ext/sources/tests/en-0-ref.txt +1 -0
  715. data/ext/sources/tests/en-1-ref.txt +1 -0
  716. data/ext/sources/tests/en-2-ref.txt +1 -0
  717. data/ext/sources/tests/es-0-ref.txt +1 -0
  718. data/ext/sources/tests/librispeech/eval.mk +39 -0
  719. data/ext/sources/tests/librispeech/eval.py +47 -0
  720. data/ext/sources/tests/librispeech/normalizers/__init__.py +2 -0
  721. data/ext/sources/tests/librispeech/normalizers/basic.py +80 -0
  722. data/ext/sources/tests/librispeech/normalizers/english.json +1741 -0
  723. data/ext/sources/tests/librispeech/normalizers/english.py +550 -0
  724. data/ext/sources/tests/librispeech/requirements.txt +6 -0
  725. data/ext/sources/tests/run-tests.sh +130 -0
  726. data/ext/sources/tests/test-c.c +3 -0
  727. data/ext/sources/tests/test-vad-full.cpp +54 -0
  728. data/ext/sources/tests/test-vad.cpp +83 -0
  729. data/ext/sources/tests/test-whisper.js +58 -0
  730. data/extsources.rb +33 -5
  731. data/lib/whisper/model/uri.rb +149 -128
  732. data/sig/whisper.rbs +480 -0
  733. data/tests/helper.rb +28 -0
  734. data/tests/test_callback.rb +45 -3
  735. data/tests/test_error.rb +2 -2
  736. data/tests/test_model.rb +38 -0
  737. data/tests/test_package.rb +18 -3
  738. data/tests/test_params.rb +145 -8
  739. data/tests/test_segment.rb +10 -19
  740. data/tests/test_vad.rb +19 -0
  741. data/tests/test_vad_params.rb +103 -0
  742. data/tests/test_whisper.rb +37 -37
  743. data/whispercpp.gemspec +5 -4
  744. metadata +766 -111
  745. data/ext/cpu.mk +0 -9
  746. data/ext/examples/dr_wav.h +0 -8815
  747. data/ext/ggml/src/ggml-cann/aclnn_ops.h +0 -592
  748. data/ext/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +0 -4262
  749. data/ext/ggml/src/ggml-cpu/ggml-cpu.c +0 -14123
  750. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.cpp +0 -1884
  751. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.h +0 -14
  752. data/ext/ggml/src/ggml-metal/ggml-metal-impl.h +0 -288
  753. data/ext/ggml/src/ggml-sycl/element_wise.cpp +0 -1030
  754. data/ext/ggml/src/ggml-sycl/im2col.cpp +0 -126
  755. data/ext/ggml/src/ggml-sycl/rope.cpp +0 -276
  756. data/ext/ggml/src/ggml-sycl/wkv6.cpp +0 -141
  757. data/ext/metal-embed.mk +0 -17
  758. data/ext/metal.mk +0 -6
  759. data/ext/ruby_whisper.cpp +0 -1909
  760. data/ext/scripts/get-flags.mk +0 -38
  761. data/lib/whisper.rb +0 -2
  762. /data/ext/{ggml → sources/ggml}/include/ggml-blas.h +0 -0
  763. /data/ext/{ggml → sources/ggml}/include/ggml-cann.h +0 -0
  764. /data/ext/{ggml → sources/ggml}/include/ggml-cuda.h +0 -0
  765. /data/ext/{ggml → sources/ggml}/include/ggml-kompute.h +0 -0
  766. /data/ext/{ggml → sources/ggml}/include/ggml-opencl.h +0 -0
  767. /data/ext/{ggml → sources/ggml}/include/ggml-sycl.h +0 -0
  768. /data/ext/{ggml → sources/ggml}/src/ggml-amx/common.h +0 -0
  769. /data/ext/{ggml → sources/ggml}/src/ggml-amx/ggml-amx.cpp +0 -0
  770. /data/ext/{ggml → sources/ggml}/src/ggml-amx/mmq.cpp +0 -0
  771. /data/ext/{ggml → sources/ggml}/src/ggml-amx/mmq.h +0 -0
  772. /data/ext/{ggml → sources/ggml}/src/ggml-blas/ggml-blas.cpp +0 -0
  773. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/ascendc_kernels.h +0 -0
  774. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_f16.cpp +0 -0
  775. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_f32.cpp +0 -0
  776. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_q4_0.cpp +0 -0
  777. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_q8_0.cpp +0 -0
  778. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +0 -0
  779. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +0 -0
  780. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +0 -0
  781. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/amx.h +0 -0
  782. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/common.h +0 -0
  783. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/mmq.cpp +0 -0
  784. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/mmq.h +0 -0
  785. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-aarch64.h +0 -0
  786. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-hbm.cpp +0 -0
  787. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-hbm.h +0 -0
  788. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-quants.h +0 -0
  789. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-traits.cpp +0 -0
  790. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-traits.h +0 -0
  791. /data/ext/{ggml → sources/ggml}/src/ggml-kompute/ggml-kompute.cpp +0 -0
  792. /data/ext/{ggml → sources/ggml}/src/ggml-quants.h +0 -0
  793. /data/ext/{ggml → sources/ggml}/src/ggml-threading.cpp +0 -0
  794. /data/ext/{ggml → sources/ggml}/src/ggml-threading.h +0 -0
  795. /data/ext/{src → sources/src}/coreml/whisper-encoder.h +0 -0
  796. /data/ext/{src → sources/src}/openvino/whisper-openvino-encoder.cpp +0 -0
  797. /data/ext/{src → sources/src}/openvino/whisper-openvino-encoder.h +0 -0
@@ -0,0 +1,3510 @@
1
+ #define _CRT_SECURE_NO_DEPRECATE // Disables "unsafe" warnings on Windows
2
+ #define _USE_MATH_DEFINES // For M_PI on MSVC
3
+
4
+ #include "ggml-backend-impl.h"
5
+ #include "ggml-backend.h"
6
+ #include "ggml-cpu-traits.h"
7
+ #include "ggml-cpu-impl.h"
8
+ #include "ggml-cpu.h"
9
+ #include "ggml-impl.h"
10
+ #include "ggml-cpu-quants.h"
11
+ #include "ggml-threading.h"
12
+ #include "unary-ops.h"
13
+ #include "binary-ops.h"
14
+ #include "vec.h"
15
+ #include "ops.h"
16
+ #include "ggml.h"
17
+
18
+ #if defined(_MSC_VER) || defined(__MINGW32__)
19
+ #include <malloc.h> // using malloc.h with MSC/MINGW
20
+ #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
21
+ #include <alloca.h>
22
+ #endif
23
+
24
+ #include <assert.h>
25
+ #include <errno.h>
26
+ #include <time.h>
27
+ #include <math.h>
28
+ #include <stdlib.h>
29
+ #include <string.h>
30
+ #include <stdint.h>
31
+ #include <inttypes.h>
32
+ #include <stdio.h>
33
+ #include <float.h>
34
+ #include <limits.h>
35
+ #include <stdarg.h>
36
+ #include <signal.h>
37
+ #if defined(__gnu_linux__)
38
+ #include <syscall.h>
39
+ #endif
40
+
41
+ #ifdef GGML_USE_OPENMP
42
+ #include <omp.h>
43
+ #endif
44
+
45
+ #if defined(__ARM_FEATURE_SVE) || defined(__ARM_FEATURE_MATMUL_INT8)
46
+ #undef GGML_USE_LLAMAFILE
47
+ #endif
48
+
49
+ #ifdef GGML_USE_LLAMAFILE
50
+ #include "llamafile/sgemm.h"
51
+ #endif
52
+
53
+ // Note: once we move threading into a separate C++ file
54
+ // will use std::hardware_destructive_interference_size instead of hardcoding it here
55
+ // and we'll use C++ attribute syntax.
56
+ #define GGML_CACHE_LINE 64
57
+
58
+ #if defined(__clang__) || defined(__GNUC__)
59
+ #define GGML_CACHE_ALIGN __attribute__((aligned(GGML_CACHE_LINE)))
60
+ #endif
61
+
62
+ #if defined(__has_feature)
63
+ #if __has_feature(thread_sanitizer)
64
+ #define GGML_TSAN_ENABLED 1
65
+ #endif
66
+ #else // __has_feature
67
+ #if defined(__SANITIZE_THREAD__)
68
+ #define GGML_TSAN_ENABLED 1
69
+ #endif
70
+ #endif // __has_feature
71
+
72
+ #define UNUSED GGML_UNUSED
73
+ #define SWAP(x, y, T) do { T SWAP = x; (x) = y; (y) = SWAP; } while (0)
74
+
75
+ #if defined(__ARM_ARCH)
76
+ struct ggml_arm_arch_features_type {
77
+ int has_neon;
78
+ int has_dotprod;
79
+ int has_i8mm;
80
+ int has_sve;
81
+ int sve_cnt;
82
+ int has_sme;
83
+ } ggml_arm_arch_features = {-1, -1, -1, -1, 0, -1};
84
+ #endif
85
+
86
+
87
+ #if defined(_WIN32)
88
+
89
+ #define WIN32_LEAN_AND_MEAN
90
+ #ifndef NOMINMAX
91
+ #define NOMINMAX
92
+ #endif
93
+ #include <windows.h>
94
+
95
+ #if defined(_MSC_VER) && !defined(__clang__)
96
+ #define GGML_CACHE_ALIGN __declspec(align(GGML_CACHE_LINE))
97
+
98
+ typedef volatile LONG atomic_int;
99
+ typedef atomic_int atomic_bool;
100
+ typedef atomic_int atomic_flag;
101
+
102
+ #define ATOMIC_FLAG_INIT 0
103
+
104
+ typedef enum {
105
+ memory_order_relaxed,
106
+ memory_order_consume,
107
+ memory_order_acquire,
108
+ memory_order_release,
109
+ memory_order_acq_rel,
110
+ memory_order_seq_cst
111
+ } memory_order;
112
+
113
+ static void atomic_store(atomic_int * ptr, LONG val) {
114
+ InterlockedExchange(ptr, val);
115
+ }
116
+ static void atomic_store_explicit(atomic_int * ptr, LONG val, memory_order mo) {
117
+ // TODO: add support for explicit memory order
118
+ InterlockedExchange(ptr, val);
119
+ }
120
+ static LONG atomic_load(atomic_int * ptr) {
121
+ return InterlockedCompareExchange(ptr, 0, 0);
122
+ }
123
+ static LONG atomic_load_explicit(atomic_int * ptr, memory_order mo) {
124
+ // TODO: add support for explicit memory order
125
+ return InterlockedCompareExchange(ptr, 0, 0);
126
+ }
127
+ static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) {
128
+ return InterlockedExchangeAdd(ptr, inc);
129
+ }
130
+ static LONG atomic_fetch_add_explicit(atomic_int * ptr, LONG inc, memory_order mo) {
131
+ // TODO: add support for explicit memory order
132
+ return InterlockedExchangeAdd(ptr, inc);
133
+ }
134
+ static atomic_bool atomic_flag_test_and_set(atomic_flag * ptr) {
135
+ return InterlockedExchange(ptr, 1);
136
+ }
137
+ static void atomic_flag_clear(atomic_flag * ptr) {
138
+ InterlockedExchange(ptr, 0);
139
+ }
140
+ static void atomic_thread_fence(memory_order mo) {
141
+ MemoryBarrier();
142
+ }
143
+ #else // clang
144
+ #include <stdatomic.h>
145
+ #endif
146
+
147
+ typedef HANDLE pthread_t;
148
+
149
+ typedef DWORD thread_ret_t;
150
+ static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) {
151
+ (void) unused;
152
+ HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
153
+ if (handle == NULL)
154
+ {
155
+ return EAGAIN;
156
+ }
157
+
158
+ *out = handle;
159
+ return 0;
160
+ }
161
+
162
+ static int pthread_join(pthread_t thread, void * unused) {
163
+ (void) unused;
164
+ int ret = (int) WaitForSingleObject(thread, INFINITE);
165
+ CloseHandle(thread);
166
+ return ret;
167
+ }
168
+
169
+ static int sched_yield (void) {
170
+ Sleep (0);
171
+ return 0;
172
+ }
173
+ #else
174
+
175
+ #include <pthread.h>
176
+ #include <stdatomic.h>
177
+ #include <sched.h>
178
+ #if defined(__FreeBSD__)
179
+ #include <pthread_np.h>
180
+ #endif
181
+
182
+ typedef void * thread_ret_t;
183
+
184
+ #include <sys/types.h>
185
+ #include <sys/stat.h>
186
+ #include <unistd.h>
187
+
188
+ #endif
189
+
190
+ typedef pthread_t ggml_thread_t;
191
+
192
+ #if defined(__APPLE__)
193
+ #include <unistd.h>
194
+ #include <mach/mach.h>
195
+ #include <TargetConditionals.h>
196
+ #endif
197
+
198
+ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = {
199
+ [GGML_TYPE_F32] = {
200
+ .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32,
201
+ .vec_dot_type = GGML_TYPE_F32,
202
+ .nrows = 1,
203
+ },
204
+ [GGML_TYPE_F16] = {
205
+ .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_fp16,
206
+ .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16,
207
+ .vec_dot_type = GGML_TYPE_F16,
208
+ .nrows = 1,
209
+ },
210
+ [GGML_TYPE_Q4_0] = {
211
+ .from_float = quantize_row_q4_0,
212
+ .vec_dot = ggml_vec_dot_q4_0_q8_0,
213
+ .vec_dot_type = GGML_TYPE_Q8_0,
214
+ #if defined (__ARM_FEATURE_MATMUL_INT8)
215
+ .nrows = 2,
216
+ #else
217
+ .nrows = 1,
218
+ #endif
219
+ },
220
+ [GGML_TYPE_Q4_1] = {
221
+ .from_float = quantize_row_q4_1,
222
+ .vec_dot = ggml_vec_dot_q4_1_q8_1,
223
+ .vec_dot_type = GGML_TYPE_Q8_1,
224
+ #if defined (__ARM_FEATURE_MATMUL_INT8)
225
+ .nrows = 2,
226
+ #else
227
+ .nrows = 1,
228
+ #endif
229
+ },
230
+ [GGML_TYPE_Q5_0] = {
231
+ .from_float = quantize_row_q5_0,
232
+ .vec_dot = ggml_vec_dot_q5_0_q8_0,
233
+ .vec_dot_type = GGML_TYPE_Q8_0,
234
+ .nrows = 1,
235
+ },
236
+ [GGML_TYPE_Q5_1] = {
237
+ .from_float = quantize_row_q5_1,
238
+ .vec_dot = ggml_vec_dot_q5_1_q8_1,
239
+ .vec_dot_type = GGML_TYPE_Q8_1,
240
+ .nrows = 1,
241
+ },
242
+ [GGML_TYPE_Q8_0] = {
243
+ .from_float = quantize_row_q8_0,
244
+ .vec_dot = ggml_vec_dot_q8_0_q8_0,
245
+ .vec_dot_type = GGML_TYPE_Q8_0,
246
+ #if defined (__ARM_FEATURE_MATMUL_INT8)
247
+ .nrows = 2,
248
+ #else
249
+ .nrows = 1,
250
+ #endif
251
+ },
252
+ [GGML_TYPE_Q8_1] = {
253
+ .from_float = quantize_row_q8_1,
254
+ .vec_dot_type = GGML_TYPE_Q8_1,
255
+ .nrows = 1,
256
+ },
257
+ [GGML_TYPE_Q2_K] = {
258
+ .from_float = quantize_row_q2_K,
259
+ .vec_dot = ggml_vec_dot_q2_K_q8_K,
260
+ .vec_dot_type = GGML_TYPE_Q8_K,
261
+ .nrows = 1,
262
+ },
263
+ [GGML_TYPE_Q3_K] = {
264
+ .from_float = quantize_row_q3_K,
265
+ .vec_dot = ggml_vec_dot_q3_K_q8_K,
266
+ .vec_dot_type = GGML_TYPE_Q8_K,
267
+ .nrows = 1,
268
+ },
269
+ [GGML_TYPE_Q4_K] = {
270
+ .from_float = quantize_row_q4_K,
271
+ .vec_dot = ggml_vec_dot_q4_K_q8_K,
272
+ .vec_dot_type = GGML_TYPE_Q8_K,
273
+ .nrows = 1,
274
+ },
275
+ [GGML_TYPE_Q5_K] = {
276
+ .from_float = quantize_row_q5_K,
277
+ .vec_dot = ggml_vec_dot_q5_K_q8_K,
278
+ .vec_dot_type = GGML_TYPE_Q8_K,
279
+ .nrows = 1,
280
+ },
281
+ [GGML_TYPE_Q6_K] = {
282
+ .from_float = quantize_row_q6_K,
283
+ .vec_dot = ggml_vec_dot_q6_K_q8_K,
284
+ .vec_dot_type = GGML_TYPE_Q8_K,
285
+ #if defined (__ARM_FEATURE_MATMUL_INT8)
286
+ .nrows = 2,
287
+ #else
288
+ .nrows = 1,
289
+ #endif
290
+ },
291
+ [GGML_TYPE_IQ2_XXS] = {
292
+ .from_float = NULL,
293
+ .vec_dot = ggml_vec_dot_iq2_xxs_q8_K,
294
+ .vec_dot_type = GGML_TYPE_Q8_K,
295
+ .nrows = 1,
296
+ },
297
+ [GGML_TYPE_IQ2_XS] = {
298
+ .from_float = NULL,
299
+ .vec_dot = ggml_vec_dot_iq2_xs_q8_K,
300
+ .vec_dot_type = GGML_TYPE_Q8_K,
301
+ .nrows = 1,
302
+ },
303
+ [GGML_TYPE_IQ3_XXS] = {
304
+ // NOTE: from_float for iq3 and iq2_s was removed because these quants require initialization in ggml_quantize_init
305
+ //.from_float = quantize_row_iq3_xxs,
306
+ .vec_dot = ggml_vec_dot_iq3_xxs_q8_K,
307
+ .vec_dot_type = GGML_TYPE_Q8_K,
308
+ .nrows = 1,
309
+ },
310
+ [GGML_TYPE_IQ3_S] = {
311
+ //.from_float = quantize_row_iq3_s,
312
+ .vec_dot = ggml_vec_dot_iq3_s_q8_K,
313
+ .vec_dot_type = GGML_TYPE_Q8_K,
314
+ .nrows = 1,
315
+ },
316
+ [GGML_TYPE_IQ2_S] = {
317
+ //.from_float = quantize_row_iq2_s,
318
+ .vec_dot = ggml_vec_dot_iq2_s_q8_K,
319
+ .vec_dot_type = GGML_TYPE_Q8_K,
320
+ .nrows = 1,
321
+ },
322
+ [GGML_TYPE_IQ1_S] = {
323
+ .from_float = NULL,
324
+ .vec_dot = ggml_vec_dot_iq1_s_q8_K,
325
+ .vec_dot_type = GGML_TYPE_Q8_K,
326
+ .nrows = 1,
327
+ },
328
+ [GGML_TYPE_IQ1_M] = {
329
+ .from_float = NULL,
330
+ .vec_dot = ggml_vec_dot_iq1_m_q8_K,
331
+ .vec_dot_type = GGML_TYPE_Q8_K,
332
+ .nrows = 1,
333
+ },
334
+ [GGML_TYPE_IQ4_NL] = {
335
+ .from_float = quantize_row_iq4_nl,
336
+ .vec_dot = ggml_vec_dot_iq4_nl_q8_0,
337
+ .vec_dot_type = GGML_TYPE_Q8_0,
338
+ .nrows = 1,
339
+ },
340
+ [GGML_TYPE_IQ4_XS] = {
341
+ .from_float = quantize_row_iq4_xs,
342
+ .vec_dot = ggml_vec_dot_iq4_xs_q8_K,
343
+ .vec_dot_type = GGML_TYPE_Q8_K,
344
+ .nrows = 1,
345
+ },
346
+ [GGML_TYPE_Q8_K] = {
347
+ .from_float = quantize_row_q8_K,
348
+ },
349
+ [GGML_TYPE_BF16] = {
350
+ .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_bf16,
351
+ .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_bf16,
352
+ .vec_dot_type = GGML_TYPE_BF16,
353
+ .nrows = 1,
354
+ },
355
+ [GGML_TYPE_TQ1_0] = {
356
+ .from_float = quantize_row_tq1_0,
357
+ .vec_dot = ggml_vec_dot_tq1_0_q8_K,
358
+ .vec_dot_type = GGML_TYPE_Q8_K,
359
+ .nrows = 1,
360
+ },
361
+ [GGML_TYPE_TQ2_0] = {
362
+ .from_float = quantize_row_tq2_0,
363
+ .vec_dot = ggml_vec_dot_tq2_0_q8_K,
364
+ .vec_dot_type = GGML_TYPE_Q8_K,
365
+ .nrows = 1,
366
+ },
367
+ };
368
+
369
+ const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type) {
370
+ return &type_traits_cpu[type];
371
+ }
372
+
373
+ //
374
+ // Threading defs
375
+ //
376
+
377
+ typedef pthread_t ggml_thread_t;
378
+
379
+ #if defined(_WIN32)
380
+
381
+ typedef CONDITION_VARIABLE ggml_cond_t;
382
+ typedef SRWLOCK ggml_mutex_t;
383
+
384
+ #define ggml_mutex_init(m) InitializeSRWLock(m)
385
+ #define ggml_mutex_destroy(m)
386
+ #define ggml_mutex_lock(m) AcquireSRWLockExclusive(m)
387
+ #define ggml_mutex_unlock(m) ReleaseSRWLockExclusive(m)
388
+ #define ggml_mutex_lock_shared(m) AcquireSRWLockShared(m)
389
+ #define ggml_mutex_unlock_shared(m) ReleaseSRWLockShared(m)
390
+
391
+ #define ggml_cond_init(c) InitializeConditionVariable(c)
392
+ #define ggml_cond_destroy(c)
393
+ #define ggml_cond_wait(c, m) SleepConditionVariableSRW(c, m, INFINITE, CONDITION_VARIABLE_LOCKMODE_SHARED)
394
+ #define ggml_cond_broadcast(c) WakeAllConditionVariable(c)
395
+
396
+ #define ggml_thread_create pthread_create
397
+ #define ggml_thread_join pthread_join
398
+
399
+ #else
400
+
401
+ typedef pthread_cond_t ggml_cond_t;
402
+ typedef pthread_mutex_t ggml_mutex_t;
403
+
404
+ #define ggml_mutex_init(m) pthread_mutex_init(m, NULL)
405
+ #define ggml_mutex_destroy(m) pthread_mutex_destroy(m)
406
+ #define ggml_mutex_lock(m) pthread_mutex_lock(m)
407
+ #define ggml_mutex_unlock(m) pthread_mutex_unlock(m)
408
+ #define ggml_mutex_lock_shared(m) pthread_mutex_lock(m)
409
+ #define ggml_mutex_unlock_shared(m) pthread_mutex_unlock(m)
410
+
411
+ #define ggml_lock_init(x) UNUSED(x)
412
+ #define ggml_lock_destroy(x) UNUSED(x)
413
+ #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
414
+ #define ggml_lock_lock(x) _mm_pause()
415
+ #else
416
+ #define ggml_lock_lock(x) UNUSED(x)
417
+ #endif
418
+ #define ggml_lock_unlock(x) UNUSED(x)
419
+
420
+ #define GGML_LOCK_INITIALIZER 0
421
+ #define ggml_cond_init(c) pthread_cond_init(c, NULL)
422
+ #define ggml_cond_destroy(c) pthread_cond_destroy(c)
423
+ #define ggml_cond_wait(c, m) pthread_cond_wait(c, m)
424
+ #define ggml_cond_broadcast(c) pthread_cond_broadcast(c)
425
+
426
+ #define ggml_thread_create pthread_create
427
+ #define ggml_thread_join pthread_join
428
+
429
+ #endif
430
+
431
+ // Threadpool def
432
+ struct ggml_threadpool {
433
+ ggml_mutex_t mutex; // mutex for cond.var
434
+ ggml_cond_t cond; // cond.var for waiting for new work
435
+
436
+ struct ggml_cgraph * cgraph;
437
+ struct ggml_cplan * cplan;
438
+
439
+ // synchronization primitives
440
+ atomic_int n_graph; // incremented when there is work to be done (i.e each graph)
441
+ atomic_int GGML_CACHE_ALIGN n_barrier;
442
+ atomic_int GGML_CACHE_ALIGN n_barrier_passed;
443
+ atomic_int GGML_CACHE_ALIGN current_chunk; // currently processing chunk during Mat_Mul, shared between all the threads.
444
+
445
+ // these are atomic as an annotation for thread-sanitizer
446
+ atomic_bool stop; // Used for stopping the threadpool altogether
447
+ atomic_bool pause; // Used for pausing the threadpool or individual threads
448
+ atomic_int abort; // Used for aborting processing of a graph
449
+
450
+ struct ggml_compute_state * workers; // per thread state
451
+ int n_threads_max; // number of threads in the pool
452
+ atomic_int n_threads_cur; // number of threads used in the current graph
453
+
454
+ int32_t prio; // Scheduling priority
455
+ uint32_t poll; // Polling level (0 - no polling)
456
+
457
+ enum ggml_status ec;
458
+ };
459
+
460
+ // Per-thread state
461
+ struct ggml_compute_state {
462
+ #ifndef GGML_USE_OPENMP
463
+ ggml_thread_t thrd;
464
+ bool cpumask[GGML_MAX_N_THREADS];
465
+ int last_graph;
466
+ bool pending;
467
+ #endif
468
+ struct ggml_threadpool * threadpool;
469
+ int ith;
470
+ };
471
+
472
+ // Helpers for polling loops
473
+ #if defined(__aarch64__) && ( defined(__clang__) || defined(__GNUC__) )
474
+ static inline void ggml_thread_cpu_relax(void) {
475
+ __asm__ volatile("yield" ::: "memory");
476
+ }
477
+ #elif defined(__x86_64__)
478
+ static inline void ggml_thread_cpu_relax(void) {
479
+ _mm_pause();
480
+ }
481
+ #else
482
+ static inline void ggml_thread_cpu_relax(void) {;}
483
+ #endif
484
+
485
+ //
486
+ // NUMA support
487
+ //
488
+
489
+ #define GGML_NUMA_MAX_NODES 8
490
+ #define GGML_NUMA_MAX_CPUS 512
491
+
492
+ struct ggml_numa_node {
493
+ uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node
494
+ uint32_t n_cpus;
495
+ };
496
+
497
+ struct ggml_numa_nodes {
498
+ enum ggml_numa_strategy numa_strategy;
499
+ struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
500
+ uint32_t n_nodes;
501
+ uint32_t total_cpus; // hardware threads on system
502
+ uint32_t current_node; // node on which main process is execting
503
+ #if defined(__gnu_linux__)
504
+ cpu_set_t cpuset; // cpuset from numactl
505
+ #else
506
+ uint32_t cpuset; // no NUMA support outside of Linux at this time. Use a portable datatype
507
+ #endif
508
+ };
509
+
510
+ //
511
+ // ggml state
512
+ //
513
+
514
+ struct ggml_state {
515
+ struct ggml_numa_nodes numa;
516
+ };
517
+
518
+ static struct ggml_state g_state = {0};
519
+
520
+ void ggml_barrier(struct ggml_threadpool * tp) {
521
+ int n_threads = atomic_load_explicit(&tp->n_threads_cur, memory_order_relaxed);
522
+ if (n_threads == 1) {
523
+ return;
524
+ }
525
+
526
+ #ifdef GGML_USE_OPENMP
527
+ #pragma omp barrier
528
+ #else
529
+ int n_passed = atomic_load_explicit(&tp->n_barrier_passed, memory_order_relaxed);
530
+
531
+ // enter barrier (full seq-cst fence)
532
+ int n_barrier = atomic_fetch_add_explicit(&tp->n_barrier, 1, memory_order_seq_cst);
533
+
534
+ if (n_barrier == (n_threads - 1)) {
535
+ // last thread
536
+ atomic_store_explicit(&tp->n_barrier, 0, memory_order_relaxed);
537
+
538
+ // exit barrier (fill seq-cst fence)
539
+ atomic_fetch_add_explicit(&tp->n_barrier_passed, 1, memory_order_seq_cst);
540
+ return;
541
+ }
542
+
543
+ // wait for other threads
544
+ while (atomic_load_explicit(&tp->n_barrier_passed, memory_order_relaxed) == n_passed) {
545
+ ggml_thread_cpu_relax();
546
+ }
547
+
548
+ // exit barrier (full seq-cst fence)
549
+ // TSAN doesn't support standalone fence yet, we use a dummy read-modify-write instead
550
+ #ifdef GGML_TSAN_ENABLED
551
+ atomic_fetch_add_explicit(&tp->n_barrier_passed, 0, memory_order_seq_cst);
552
+ #else
553
+ atomic_thread_fence(memory_order_seq_cst);
554
+ #endif
555
+ #endif
556
+ }
557
+
558
+ #if defined(__gnu_linux__)
559
+ static cpu_set_t ggml_get_numa_affinity(void) {
560
+ cpu_set_t cpuset;
561
+ pthread_t thread;
562
+ thread = pthread_self();
563
+ CPU_ZERO(&cpuset);
564
+ pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
565
+ return cpuset;
566
+ }
567
+ #else
568
+ static uint32_t ggml_get_numa_affinity(void) {
569
+ return 0; // no NUMA support
570
+ }
571
+ #endif
572
+
573
+ void ggml_numa_init(enum ggml_numa_strategy numa_flag) {
574
+ if (g_state.numa.n_nodes > 0) {
575
+ fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
576
+
577
+ return;
578
+ }
579
+
580
+ #if defined(__gnu_linux__)
581
+ struct stat st;
582
+ char path[256];
583
+ int rv;
584
+
585
+ // set numa scheme
586
+ g_state.numa.numa_strategy = numa_flag;
587
+
588
+ GGML_PRINT_DEBUG("numa strategy %u\n",g_state.numa.numa_strategy);
589
+
590
+ g_state.numa.cpuset = ggml_get_numa_affinity();
591
+
592
+ // enumerate nodes
593
+ while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
594
+ rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
595
+ GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
596
+ if (stat(path, &st) != 0) { break; }
597
+ ++g_state.numa.n_nodes;
598
+ }
599
+
600
+ // enumerate CPUs
601
+ while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) {
602
+ rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus);
603
+ GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
604
+ if (stat(path, &st) != 0) { break; }
605
+ ++g_state.numa.total_cpus;
606
+ }
607
+
608
+ GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
609
+
610
+ // figure out which node we're on
611
+ uint current_cpu;
612
+ int getcpu_ret = 0;
613
+ #if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 33) || defined(__COSMOPOLITAN__)
614
+ getcpu_ret = getcpu(&current_cpu, &g_state.numa.current_node);
615
+ #else
616
+ // old glibc doesn't have a wrapper for this call. Fall back on direct syscall
617
+ # if !defined(SYS_getcpu) && defined(SYS_get_cpu)
618
+ # define SYS_getcpu SYS_get_cpu // some older glibc versions use this name
619
+ # endif
620
+ getcpu_ret = syscall(SYS_getcpu, &current_cpu, &g_state.numa.current_node);
621
+ #endif
622
+
623
+ if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1 || getcpu_ret != 0) {
624
+ g_state.numa.n_nodes = 0;
625
+ return;
626
+ }
627
+
628
+ GGML_PRINT_DEBUG("found our process on numa node %u, CPU %u\n", g_state.numa.current_node, current_cpu);
629
+
630
+ for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
631
+ struct ggml_numa_node * node = &g_state.numa.nodes[n];
632
+ GGML_PRINT_DEBUG("CPUs on node %u:", n);
633
+ node->n_cpus = 0;
634
+ for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) {
635
+ rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c);
636
+ GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
637
+ if (stat(path, &st) == 0) {
638
+ node->cpus[node->n_cpus++] = c;
639
+ GGML_PRINT_DEBUG(" %u", c);
640
+ }
641
+ }
642
+ GGML_PRINT_DEBUG("\n");
643
+ }
644
+
645
+ if (ggml_is_numa()) {
646
+ FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r");
647
+ if (fptr != NULL) {
648
+ char buf[42];
649
+ if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) {
650
+ GGML_LOG_WARN("/proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n");
651
+ }
652
+ fclose(fptr);
653
+ }
654
+ }
655
+ #else
656
+ UNUSED(numa_flag);
657
+ // TODO
658
+ #endif
659
+ }
660
+
661
+ bool ggml_is_numa(void) {
662
+ return g_state.numa.n_nodes > 1;
663
+ }
664
+
665
+ #if defined(__ARM_ARCH)
666
+
667
+ #if defined(__linux__) && defined(__aarch64__)
668
+ #include <sys/auxv.h>
669
+ #elif defined(__APPLE__)
670
+ #include <sys/sysctl.h>
671
+ #endif
672
+
673
+ #if !defined(HWCAP2_I8MM)
674
+ #define HWCAP2_I8MM (1 << 13)
675
+ #endif
676
+
677
+ #if !defined(HWCAP2_SME)
678
+ #define HWCAP2_SME (1 << 23)
679
+ #endif
680
+
681
+ static void ggml_init_arm_arch_features(void) {
682
+ #if defined(__linux__) && defined(__aarch64__)
683
+ uint32_t hwcap = getauxval(AT_HWCAP);
684
+ uint32_t hwcap2 = getauxval(AT_HWCAP2);
685
+
686
+ ggml_arm_arch_features.has_neon = !!(hwcap & HWCAP_ASIMD);
687
+ ggml_arm_arch_features.has_dotprod = !!(hwcap & HWCAP_ASIMDDP);
688
+ ggml_arm_arch_features.has_i8mm = !!(hwcap2 & HWCAP2_I8MM);
689
+ ggml_arm_arch_features.has_sve = !!(hwcap & HWCAP_SVE);
690
+ ggml_arm_arch_features.has_sme = !!(hwcap2 & HWCAP2_SME);
691
+
692
+ #if defined(__ARM_FEATURE_SVE)
693
+ ggml_arm_arch_features.sve_cnt = PR_SVE_VL_LEN_MASK & prctl(PR_SVE_GET_VL);
694
+ #endif
695
+ #elif defined(__APPLE__)
696
+ int oldp = 0;
697
+ size_t size = sizeof(oldp);
698
+ if (sysctlbyname("hw.optional.AdvSIMD", &oldp, &size, NULL, 0) != 0) {
699
+ oldp = 0;
700
+ }
701
+ ggml_arm_arch_features.has_neon = oldp;
702
+
703
+ if (sysctlbyname("hw.optional.arm.FEAT_DotProd", &oldp, &size, NULL, 0) != 0) {
704
+ oldp = 0;
705
+ }
706
+ ggml_arm_arch_features.has_dotprod = oldp;
707
+
708
+ if (sysctlbyname("hw.optional.arm.FEAT_I8MM", &oldp, &size, NULL, 0) != 0) {
709
+ oldp = 0;
710
+ }
711
+ ggml_arm_arch_features.has_i8mm = oldp;
712
+
713
+ if (sysctlbyname("hw.optional.arm.FEAT_SME", &oldp, &size, NULL, 0) != 0) {
714
+ oldp = 0;
715
+ }
716
+ ggml_arm_arch_features.has_sme = oldp;
717
+
718
+ ggml_arm_arch_features.has_sve = 0;
719
+ ggml_arm_arch_features.sve_cnt = 0;
720
+ #else
721
+ // Run-time CPU feature detection not implemented for this platform, fallback to compile time
722
+ #if defined(__ARM_NEON)
723
+ ggml_arm_arch_features.has_neon = 1;
724
+ #else
725
+ ggml_arm_arch_features.has_neon = 0;
726
+ #endif
727
+
728
+ #if defined(__ARM_FEATURE_MATMUL_INT8)
729
+ ggml_arm_arch_features.has_i8mm = 1;
730
+ #else
731
+ ggml_arm_arch_features.has_i8mm = 0;
732
+ #endif
733
+
734
+ #if defined(__ARM_FEATURE_SVE)
735
+ ggml_arm_arch_features.has_sve = 1;
736
+ ggml_arm_arch_features.sve_cnt = 16;
737
+ #else
738
+ ggml_arm_arch_features.has_sve = 0;
739
+ ggml_arm_arch_features.sve_cnt = 0;
740
+ #endif
741
+
742
+ #if defined(__ARM_FEATURE_SME) || defined(__ARM_FEATURE_SME2)
743
+ ggml_arm_arch_features.has_sme = 1;
744
+ #else
745
+ ggml_arm_arch_features.has_sme = 0;
746
+ #endif
747
+ #endif
748
+ }
749
+ #endif
750
+
751
+ struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
752
+ GGML_ASSERT(!ggml_get_no_alloc(ctx));
753
+
754
+ struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
755
+
756
+ ggml_set_i32(result, value);
757
+
758
+ return result;
759
+ }
760
+
761
+ struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
762
+ GGML_ASSERT(!ggml_get_no_alloc(ctx));
763
+
764
+ struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
765
+
766
+ ggml_set_f32(result, value);
767
+
768
+ return result;
769
+ }
770
+
771
+ struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
772
+ const int n = ggml_nrows(tensor);
773
+ const int nc = tensor->ne[0];
774
+ const size_t n1 = tensor->nb[1];
775
+
776
+ char * const data = tensor->data;
777
+
778
+ switch (tensor->type) {
779
+ case GGML_TYPE_I8:
780
+ {
781
+ assert(tensor->nb[0] == sizeof(int8_t));
782
+ for (int i = 0; i < n; i++) {
783
+ ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
784
+ }
785
+ } break;
786
+ case GGML_TYPE_I16:
787
+ {
788
+ assert(tensor->nb[0] == sizeof(int16_t));
789
+ for (int i = 0; i < n; i++) {
790
+ ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
791
+ }
792
+ } break;
793
+ case GGML_TYPE_I32:
794
+ {
795
+ assert(tensor->nb[0] == sizeof(int32_t));
796
+ for (int i = 0; i < n; i++) {
797
+ ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
798
+ }
799
+ } break;
800
+ case GGML_TYPE_F16:
801
+ {
802
+ assert(tensor->nb[0] == sizeof(ggml_fp16_t));
803
+ for (int i = 0; i < n; i++) {
804
+ ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
805
+ }
806
+ } break;
807
+ case GGML_TYPE_BF16:
808
+ {
809
+ assert(tensor->nb[0] == sizeof(ggml_fp16_t));
810
+ for (int i = 0; i < n; i++) {
811
+ ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value));
812
+ }
813
+ } break;
814
+ case GGML_TYPE_F32:
815
+ {
816
+ assert(tensor->nb[0] == sizeof(float));
817
+ for (int i = 0; i < n; i++) {
818
+ ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
819
+ }
820
+ } break;
821
+ default:
822
+ {
823
+ GGML_ABORT("fatal error");
824
+ }
825
+ }
826
+
827
+ return tensor;
828
+ }
829
+
830
+ struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
831
+ const int n = ggml_nrows(tensor);
832
+ const int nc = tensor->ne[0];
833
+ const size_t n1 = tensor->nb[1];
834
+
835
+ char * const data = tensor->data;
836
+
837
+ switch (tensor->type) {
838
+ case GGML_TYPE_I8:
839
+ {
840
+ assert(tensor->nb[0] == sizeof(int8_t));
841
+ for (int i = 0; i < n; i++) {
842
+ ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
843
+ }
844
+ } break;
845
+ case GGML_TYPE_I16:
846
+ {
847
+ assert(tensor->nb[0] == sizeof(int16_t));
848
+ for (int i = 0; i < n; i++) {
849
+ ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
850
+ }
851
+ } break;
852
+ case GGML_TYPE_I32:
853
+ {
854
+ assert(tensor->nb[0] == sizeof(int32_t));
855
+ for (int i = 0; i < n; i++) {
856
+ ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
857
+ }
858
+ } break;
859
+ case GGML_TYPE_F16:
860
+ {
861
+ assert(tensor->nb[0] == sizeof(ggml_fp16_t));
862
+ for (int i = 0; i < n; i++) {
863
+ ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value));
864
+ }
865
+ } break;
866
+ case GGML_TYPE_BF16:
867
+ {
868
+ assert(tensor->nb[0] == sizeof(ggml_bf16_t));
869
+ for (int i = 0; i < n; i++) {
870
+ ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value));
871
+ }
872
+ } break;
873
+ case GGML_TYPE_F32:
874
+ {
875
+ assert(tensor->nb[0] == sizeof(float));
876
+ for (int i = 0; i < n; i++) {
877
+ ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
878
+ }
879
+ } break;
880
+ default:
881
+ {
882
+ GGML_ABORT("fatal error");
883
+ }
884
+ }
885
+
886
+ return tensor;
887
+ }
888
+
889
+ int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
890
+ if (!ggml_is_contiguous(tensor)) {
891
+ int64_t id[4] = { 0, 0, 0, 0 };
892
+ ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
893
+ return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]);
894
+ }
895
+ switch (tensor->type) {
896
+ case GGML_TYPE_I8:
897
+ {
898
+ GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
899
+ return ((int8_t *)(tensor->data))[i];
900
+ }
901
+ case GGML_TYPE_I16:
902
+ {
903
+ GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
904
+ return ((int16_t *)(tensor->data))[i];
905
+ }
906
+ case GGML_TYPE_I32:
907
+ {
908
+ GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
909
+ return ((int32_t *)(tensor->data))[i];
910
+ }
911
+ case GGML_TYPE_F16:
912
+ {
913
+ GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
914
+ return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
915
+ }
916
+ case GGML_TYPE_BF16:
917
+ {
918
+ GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
919
+ return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]);
920
+ }
921
+ case GGML_TYPE_F32:
922
+ {
923
+ GGML_ASSERT(tensor->nb[0] == sizeof(float));
924
+ return ((float *)(tensor->data))[i];
925
+ }
926
+ default:
927
+ {
928
+ GGML_ABORT("fatal error");
929
+ }
930
+ }
931
+ }
932
+
933
+ void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
934
+ if (!ggml_is_contiguous(tensor)) {
935
+ int64_t id[4] = { 0, 0, 0, 0 };
936
+ ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
937
+ ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value);
938
+ return;
939
+ }
940
+ switch (tensor->type) {
941
+ case GGML_TYPE_I8:
942
+ {
943
+ GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
944
+ ((int8_t *)(tensor->data))[i] = value;
945
+ } break;
946
+ case GGML_TYPE_I16:
947
+ {
948
+ GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
949
+ ((int16_t *)(tensor->data))[i] = value;
950
+ } break;
951
+ case GGML_TYPE_I32:
952
+ {
953
+ GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
954
+ ((int32_t *)(tensor->data))[i] = value;
955
+ } break;
956
+ case GGML_TYPE_F16:
957
+ {
958
+ GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
959
+ ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
960
+ } break;
961
+ case GGML_TYPE_BF16:
962
+ {
963
+ GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
964
+ ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value);
965
+ } break;
966
+ case GGML_TYPE_F32:
967
+ {
968
+ GGML_ASSERT(tensor->nb[0] == sizeof(float));
969
+ ((float *)(tensor->data))[i] = value;
970
+ } break;
971
+ default:
972
+ {
973
+ GGML_ABORT("fatal error");
974
+ }
975
+ }
976
+ }
977
+
978
+ int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
979
+ void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
980
+ switch (tensor->type) {
981
+ case GGML_TYPE_I8:
982
+ return ((int8_t *) data)[0];
983
+ case GGML_TYPE_I16:
984
+ return ((int16_t *) data)[0];
985
+ case GGML_TYPE_I32:
986
+ return ((int32_t *) data)[0];
987
+ case GGML_TYPE_F16:
988
+ return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
989
+ case GGML_TYPE_BF16:
990
+ return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]);
991
+ case GGML_TYPE_F32:
992
+ return ((float *) data)[0];
993
+ default:
994
+ GGML_ABORT("fatal error");
995
+ }
996
+ }
997
+
998
+ void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) {
999
+ void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
1000
+ switch (tensor->type) {
1001
+ case GGML_TYPE_I8:
1002
+ {
1003
+ ((int8_t *)(data))[0] = value;
1004
+ } break;
1005
+ case GGML_TYPE_I16:
1006
+ {
1007
+ ((int16_t *)(data))[0] = value;
1008
+ } break;
1009
+ case GGML_TYPE_I32:
1010
+ {
1011
+ ((int32_t *)(data))[0] = value;
1012
+ } break;
1013
+ case GGML_TYPE_F16:
1014
+ {
1015
+ ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
1016
+ } break;
1017
+ case GGML_TYPE_BF16:
1018
+ {
1019
+ ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value);
1020
+ } break;
1021
+ case GGML_TYPE_F32:
1022
+ {
1023
+ ((float *)(data))[0] = value;
1024
+ } break;
1025
+ default:
1026
+ {
1027
+ GGML_ABORT("fatal error");
1028
+ }
1029
+ }
1030
+ }
1031
+
1032
+ float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
1033
+ if (!ggml_is_contiguous(tensor)) {
1034
+ int64_t id[4] = { 0, 0, 0, 0 };
1035
+ ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
1036
+ return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]);
1037
+ }
1038
+ switch (tensor->type) {
1039
+ case GGML_TYPE_I8:
1040
+ {
1041
+ return ((int8_t *)(tensor->data))[i];
1042
+ }
1043
+ case GGML_TYPE_I16:
1044
+ {
1045
+ return ((int16_t *)(tensor->data))[i];
1046
+ }
1047
+ case GGML_TYPE_I32:
1048
+ {
1049
+ return ((int32_t *)(tensor->data))[i];
1050
+ }
1051
+ case GGML_TYPE_F16:
1052
+ {
1053
+ return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
1054
+ }
1055
+ case GGML_TYPE_BF16:
1056
+ {
1057
+ return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]);
1058
+ }
1059
+ case GGML_TYPE_F32:
1060
+ {
1061
+ return ((float *)(tensor->data))[i];
1062
+ }
1063
+ default:
1064
+ {
1065
+ GGML_ABORT("fatal error");
1066
+ }
1067
+ }
1068
+ }
1069
+
1070
+ void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
1071
+ if (!ggml_is_contiguous(tensor)) {
1072
+ int64_t id[4] = { 0, 0, 0, 0 };
1073
+ ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
1074
+ ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value);
1075
+ return;
1076
+ }
1077
+ switch (tensor->type) {
1078
+ case GGML_TYPE_I8:
1079
+ {
1080
+ ((int8_t *)(tensor->data))[i] = value;
1081
+ } break;
1082
+ case GGML_TYPE_I16:
1083
+ {
1084
+ ((int16_t *)(tensor->data))[i] = value;
1085
+ } break;
1086
+ case GGML_TYPE_I32:
1087
+ {
1088
+ ((int32_t *)(tensor->data))[i] = value;
1089
+ } break;
1090
+ case GGML_TYPE_F16:
1091
+ {
1092
+ ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
1093
+ } break;
1094
+ case GGML_TYPE_BF16:
1095
+ {
1096
+ ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value);
1097
+ } break;
1098
+ case GGML_TYPE_F32:
1099
+ {
1100
+ ((float *)(tensor->data))[i] = value;
1101
+ } break;
1102
+ default:
1103
+ {
1104
+ GGML_ABORT("fatal error");
1105
+ }
1106
+ }
1107
+ }
1108
+
1109
+ float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
1110
+ void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
1111
+ switch (tensor->type) {
1112
+ case GGML_TYPE_I8:
1113
+ return ((int8_t *) data)[0];
1114
+ case GGML_TYPE_I16:
1115
+ return ((int16_t *) data)[0];
1116
+ case GGML_TYPE_I32:
1117
+ return ((int32_t *) data)[0];
1118
+ case GGML_TYPE_F16:
1119
+ return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
1120
+ case GGML_TYPE_BF16:
1121
+ return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]);
1122
+ case GGML_TYPE_F32:
1123
+ return ((float *) data)[0];
1124
+ default:
1125
+ GGML_ABORT("fatal error");
1126
+ }
1127
+ }
1128
+
1129
+ void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) {
1130
+ void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
1131
+ switch (tensor->type) {
1132
+ case GGML_TYPE_I8:
1133
+ {
1134
+ ((int8_t *)(data))[0] = value;
1135
+ } break;
1136
+ case GGML_TYPE_I16:
1137
+ {
1138
+ ((int16_t *)(data))[0] = value;
1139
+ } break;
1140
+ case GGML_TYPE_I32:
1141
+ {
1142
+ ((int32_t *)(data))[0] = value;
1143
+ } break;
1144
+ case GGML_TYPE_F16:
1145
+ {
1146
+ ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
1147
+ } break;
1148
+ case GGML_TYPE_BF16:
1149
+ {
1150
+ ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value);
1151
+ } break;
1152
+ case GGML_TYPE_F32:
1153
+ {
1154
+ ((float *)(data))[0] = value;
1155
+ } break;
1156
+ default:
1157
+ {
1158
+ GGML_ABORT("fatal error");
1159
+ }
1160
+ }
1161
+ }
1162
+
1163
+ ////////////////////////////////////////////////////////////////////////////////
1164
+
1165
+ // ggml_compute_forward_mul_mat
1166
+
1167
+ static void ggml_compute_forward_mul_mat_one_chunk(
1168
+ const struct ggml_compute_params * params,
1169
+ struct ggml_tensor * dst,
1170
+ const enum ggml_type type,
1171
+ const int64_t num_rows_per_vec_dot,
1172
+ const int64_t ir0_start,
1173
+ const int64_t ir0_end,
1174
+ const int64_t ir1_start,
1175
+ const int64_t ir1_end) {
1176
+
1177
+ const struct ggml_tensor * src0 = dst->src[0];
1178
+ const struct ggml_tensor * src1 = dst->src[1];
1179
+
1180
+ GGML_TENSOR_BINARY_OP_LOCALS
1181
+
1182
+ const bool src1_cont = ggml_is_contiguous(src1);
1183
+
1184
+ ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot;
1185
+ enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type;
1186
+
1187
+ // broadcast factors
1188
+ const int64_t r2 = ne12 / ne02;
1189
+ const int64_t r3 = ne13 / ne03;
1190
+
1191
+ //printf("ir0_start = %6lld, ir0_end = %6lld, ir1_start = %6lld, ir1_end = %6lld\n", ir0_start, ir0_end, ir1_start, ir1_end);
1192
+
1193
+ // threads with no work simply yield (not sure if it helps)
1194
+ if (ir0_start >= ir0_end || ir1_start >= ir1_end) {
1195
+ return;
1196
+ }
1197
+
1198
+ const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
1199
+ const size_t row_size = ggml_row_size(vec_dot_type, ne10);
1200
+
1201
+ assert(ne12 % ne02 == 0);
1202
+ assert(ne13 % ne03 == 0);
1203
+
1204
+ // block-tiling attempt
1205
+ const int64_t blck_0 = 16;
1206
+ const int64_t blck_1 = 16;
1207
+
1208
+ const size_t src1_col_stride = src1_cont || src1->type != vec_dot_type ? row_size : nb11;
1209
+
1210
+ // attempt to reduce false-sharing (does not seem to make a difference)
1211
+ // 16 * 2, accounting for mmla kernels
1212
+ float tmp[32];
1213
+
1214
+ for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) {
1215
+ for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) {
1216
+ for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ir1 += num_rows_per_vec_dot) {
1217
+ const int64_t i13 = (ir1 / (ne12 * ne1));
1218
+ const int64_t i12 = (ir1 - i13 * ne12 * ne1) / ne1;
1219
+ const int64_t i11 = (ir1 - i13 * ne12 * ne1 - i12 * ne1);
1220
+
1221
+ // broadcast src0 into src1
1222
+ const int64_t i03 = i13 / r3;
1223
+ const int64_t i02 = i12 / r2;
1224
+
1225
+ const int64_t i1 = i11;
1226
+ const int64_t i2 = i12;
1227
+ const int64_t i3 = i13;
1228
+
1229
+ const char * src0_row = (const char*)src0->data + (0 + i02 * nb02 + i03 * nb03);
1230
+
1231
+ // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
1232
+ // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
1233
+ // the original src1 data pointer, so we should index using the indices directly
1234
+ // TODO: this is a bit of a hack, we should probably have a better way to handle this
1235
+ const char * src1_col = (const char*)wdata +
1236
+ (src1_cont || src1->type != vec_dot_type
1237
+ ? (i11 + i12 * ne11 + i13 * ne12 * ne11) * row_size
1238
+ : (i11 * nb11 + i12 * nb12 + i13 * nb13));
1239
+ float * dst_col = (float*)((char*)dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3));
1240
+
1241
+ //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) {
1242
+ // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
1243
+ //}
1244
+
1245
+ for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ir0 += num_rows_per_vec_dot) {
1246
+ vec_dot(ne00, &tmp[ir0 - iir0], (num_rows_per_vec_dot > 1 ? 16 : 0), src0_row + ir0 * nb01, (num_rows_per_vec_dot > 1 ? nb01 : 0), src1_col, (num_rows_per_vec_dot > 1 ? src1_col_stride : 0), num_rows_per_vec_dot);
1247
+ }
1248
+
1249
+ for (int cn = 0; cn < num_rows_per_vec_dot; ++cn) {
1250
+ memcpy(&dst_col[iir0 + cn * nb1 / nb0], tmp + (cn * 16), (MIN(iir0 + blck_0, ir0_end) - iir0) * sizeof(float));
1251
+ }
1252
+ }
1253
+ }
1254
+ }
1255
+ }
1256
+
1257
+ static void ggml_compute_forward_mul_mat(
1258
+ const struct ggml_compute_params * params,
1259
+ struct ggml_tensor * dst) {
1260
+
1261
+ const struct ggml_tensor * src0 = dst->src[0];
1262
+ const struct ggml_tensor * src1 = dst->src[1];
1263
+
1264
+ GGML_TENSOR_BINARY_OP_LOCALS
1265
+
1266
+ const int ith = params->ith;
1267
+ const int nth = params->nth;
1268
+
1269
+ enum ggml_type const vec_dot_type = type_traits_cpu[src0->type].vec_dot_type;
1270
+ ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float;
1271
+ int64_t const vec_dot_num_rows = type_traits_cpu[src0->type].nrows;
1272
+
1273
+ GGML_ASSERT(ne0 == ne01);
1274
+ GGML_ASSERT(ne1 == ne11);
1275
+ GGML_ASSERT(ne2 == ne12);
1276
+ GGML_ASSERT(ne3 == ne13);
1277
+
1278
+ // we don't support permuted src0 or src1
1279
+ GGML_ASSERT(nb00 == ggml_type_size(src0->type));
1280
+ GGML_ASSERT(nb10 == ggml_type_size(src1->type));
1281
+
1282
+ // dst cannot be transposed or permuted
1283
+ GGML_ASSERT(nb0 == sizeof(float));
1284
+ GGML_ASSERT(nb0 <= nb1);
1285
+ GGML_ASSERT(nb1 <= nb2);
1286
+ GGML_ASSERT(nb2 <= nb3);
1287
+
1288
+ // nb01 >= nb00 - src0 is not transposed
1289
+ // compute by src0 rows
1290
+
1291
+ // TODO: extract to "extra_op"
1292
+ #if GGML_USE_LLAMAFILE
1293
+ // broadcast factors
1294
+ const int64_t r2 = ne12 / ne02;
1295
+ const int64_t r3 = ne13 / ne03;
1296
+
1297
+ const bool src1_cont = ggml_is_contiguous(src1);
1298
+
1299
+ if (src1_cont) {
1300
+ for (int64_t i13 = 0; i13 < ne13; i13++)
1301
+ for (int64_t i12 = 0; i12 < ne12; i12++)
1302
+ if (!llamafile_sgemm(params,
1303
+ ne01, ne11, ne00/ggml_blck_size(src0->type),
1304
+ (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
1305
+ nb01/ggml_type_size(src0->type),
1306
+ (const char *)src1->data + i12*nb12 + i13*nb13,
1307
+ nb11/ggml_type_size(src1->type),
1308
+ (char *)dst->data + i12*nb2 + i13*nb3,
1309
+ nb1/ggml_type_size(dst->type),
1310
+ src0->type,
1311
+ src1->type,
1312
+ dst->type))
1313
+ goto UseGgmlGemm1;
1314
+ return;
1315
+ }
1316
+ UseGgmlGemm1:;
1317
+ #endif
1318
+
1319
+ if (src1->type != vec_dot_type) {
1320
+ char * wdata = params->wdata;
1321
+
1322
+ const size_t nbw0 = ggml_type_size(vec_dot_type);
1323
+ const size_t nbw1 = ggml_row_size(vec_dot_type, ne10);
1324
+ const size_t nbw2 = nbw1*ne11;
1325
+ const size_t nbw3 = nbw2*ne12;
1326
+
1327
+ assert(params->wsize >= ne13*nbw3);
1328
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
1329
+
1330
+ #if 0
1331
+ for (int64_t i13 = 0; i13 < ne13; ++i13) {
1332
+ for (int64_t i12 = 0; i12 < ne12; ++i12) {
1333
+ for (int64_t i11 = ith; i11 < ne11; i11 += nth) {
1334
+ from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11),
1335
+ (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1),
1336
+ ne10);
1337
+ }
1338
+ }
1339
+ }
1340
+ #else
1341
+ for (int64_t i13 = 0; i13 < ne13; ++i13) {
1342
+ for (int64_t i12 = 0; i12 < ne12; ++i12) {
1343
+ for (int64_t i11 = 0; i11 < ne11; ++i11) {
1344
+ size_t bs = ggml_blck_size(vec_dot_type);
1345
+ int64_t ne10_block_start = (ith * ne10/bs) / nth;
1346
+ int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth;
1347
+ from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10),
1348
+ (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0),
1349
+ (ne10_block_end - ne10_block_start) * bs);
1350
+ }
1351
+ }
1352
+ }
1353
+ #endif
1354
+ }
1355
+
1356
+ if (ith == 0) {
1357
+ // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start.
1358
+ atomic_store_explicit(&params->threadpool->current_chunk, nth, memory_order_relaxed);
1359
+ }
1360
+
1361
+ ggml_barrier(params->threadpool);
1362
+
1363
+ #if GGML_USE_LLAMAFILE
1364
+ if (src1->type != vec_dot_type) {
1365
+ const void* wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
1366
+ const size_t row_size = ggml_row_size(vec_dot_type, ne10);
1367
+
1368
+ for (int64_t i13 = 0; i13 < ne13; i13++)
1369
+ for (int64_t i12 = 0; i12 < ne12; i12++)
1370
+ if (!llamafile_sgemm(params,
1371
+ ne01, ne11, ne00/ggml_blck_size(src0->type),
1372
+ (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
1373
+ nb01/ggml_type_size(src0->type),
1374
+ (const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size,
1375
+ row_size/ggml_type_size(vec_dot_type),
1376
+ (char *)dst->data + i12*nb2 + i13*nb3,
1377
+ nb1/ggml_type_size(dst->type),
1378
+ src0->type,
1379
+ vec_dot_type,
1380
+ dst->type))
1381
+ goto UseGgmlGemm2;
1382
+ return;
1383
+ }
1384
+ UseGgmlGemm2:;
1385
+ #endif
1386
+
1387
+ // This is the size of the first dimension of the result, so we can iterate that way. (see the ASSERT above, these are the same numbers)
1388
+ const int64_t nr0 = ne0;
1389
+
1390
+ // This is the size of the rest of the dimensions of the result
1391
+ const int64_t nr1 = ne1 * ne2 * ne3;
1392
+
1393
+ // Now select a reasonable chunk size.
1394
+ int chunk_size = 16;
1395
+
1396
+ // We need to step up the size if it's small
1397
+ if (nr0 == 1 || nr1 == 1) {
1398
+ chunk_size = 64;
1399
+ }
1400
+
1401
+ // distribute the work across the inner or outer loop based on which one is larger
1402
+ // The number of chunks in the 0/1 dim.
1403
+ // CEIL(nr0/chunk_size)
1404
+ int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size;
1405
+ int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size;
1406
+
1407
+ // If the chunking is poor for the number of threads on this setup, scrap the whole plan. Re-chunk it by thread.
1408
+ // Also, chunking by thread was measured to have perform better on NUMA systems. See https://github.com/ggml-org/llama.cpp/pull/6915
1409
+ // In theory, chunking should be just as useful on NUMA and non NUMA systems, but testing disagreed with that.
1410
+ if (nchunk0 * nchunk1 < nth * 4 || ggml_is_numa()) {
1411
+ // distribute the thread work across the inner or outer loop based on which one is larger
1412
+ nchunk0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
1413
+ nchunk1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
1414
+ }
1415
+
1416
+ // The number of elements in each chunk
1417
+ const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0;
1418
+ const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1;
1419
+
1420
+ // The first chunk comes from our thread_id, the rest will get auto-assigned.
1421
+ int current_chunk = ith;
1422
+
1423
+ while (current_chunk < nchunk0 * nchunk1) {
1424
+ const int64_t ith0 = current_chunk % nchunk0;
1425
+ const int64_t ith1 = current_chunk / nchunk0;
1426
+
1427
+ const int64_t ir0_start = dr0 * ith0;
1428
+ const int64_t ir0_end = MIN(ir0_start + dr0, nr0);
1429
+
1430
+ const int64_t ir1_start = dr1 * ith1;
1431
+ const int64_t ir1_end = MIN(ir1_start + dr1, nr1);
1432
+
1433
+ // dot kernels can handle 1 row and col at a time, but mmla kernels can process 2 rows and cols
1434
+ int64_t num_rows_per_vec_dot = vec_dot_num_rows;
1435
+
1436
+ // these checks are needed to avoid crossing dim1 boundaries
1437
+ // can be optimized, but the logic would become more complicated, so keeping it like this for simplicity
1438
+ if ((nr0 % 2 != 0) || (ne11 % 2 != 0) || ((ir0_end - ir0_start) % 2 != 0) || ((ir1_end - ir1_start) % 2 != 0)) {
1439
+ num_rows_per_vec_dot = 1;
1440
+ }
1441
+ ggml_compute_forward_mul_mat_one_chunk(params, dst, src0->type, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end);
1442
+
1443
+ if (nth >= nchunk0 * nchunk1) {
1444
+ break;
1445
+ }
1446
+
1447
+ current_chunk = atomic_fetch_add_explicit(&params->threadpool->current_chunk, 1, memory_order_relaxed);
1448
+ }
1449
+ }
1450
+
1451
+ // ggml_compute_forward_mul_mat_id
1452
+
1453
+ #define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ids->ne[0]*ids->ne[1] + (i1)]
1454
+
1455
+ struct mmid_row_mapping {
1456
+ int32_t i1;
1457
+ int32_t i2;
1458
+ };
1459
+
1460
+ static void ggml_compute_forward_mul_mat_id_one_chunk(
1461
+ struct ggml_tensor * dst,
1462
+ const struct ggml_tensor * src0,
1463
+ const struct ggml_tensor * src1,
1464
+ const struct ggml_tensor * ids,
1465
+ const int64_t cur_a,
1466
+ const int64_t ir0_start,
1467
+ const int64_t ir0_end,
1468
+ const int64_t ir1_start,
1469
+ const int64_t ir1_end,
1470
+ const char * src0_cur,
1471
+ const struct mmid_row_mapping * matrix_rows,
1472
+ const size_t row_size,
1473
+ const bool src1_cont,
1474
+ const void * wdata) {
1475
+
1476
+ GGML_TENSOR_BINARY_OP_LOCALS
1477
+
1478
+ const enum ggml_type type = src0->type;
1479
+
1480
+ ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot;
1481
+ enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type;
1482
+
1483
+ const int64_t blck_0 = 16;
1484
+ const int64_t blck_1 = 16;
1485
+
1486
+ float tmp[16];
1487
+
1488
+ for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) {
1489
+ for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) {
1490
+ for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ++ir1) {
1491
+ const int64_t _i12 = ir1; // logical row index for this expert
1492
+
1493
+ struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, _i12);
1494
+ const int id = row_mapping.i1; // selected expert index
1495
+
1496
+ const int64_t i11 = id % ne11;
1497
+ const int64_t i12 = row_mapping.i2; // row index in src1
1498
+
1499
+ const int64_t i1 = id; // selected expert index
1500
+ const int64_t i2 = i12; // row
1501
+
1502
+ // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
1503
+ // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
1504
+ // the original src1 data pointer, so we should index using the indices directly
1505
+ // TODO: this is a bit of a hack, we should probably have a better way to handle this
1506
+ const char * src1_col = (const char *) wdata +
1507
+ (src1_cont || src1->type != vec_dot_type
1508
+ ? (i11 + i12*ne11)*row_size
1509
+ : (i11*nb11 + i12*nb12));
1510
+
1511
+ float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2));
1512
+
1513
+ for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) {
1514
+ vec_dot(ne00, &tmp[ir0 - iir0], 0, src0_cur + ir0*nb01, 0, src1_col, 0, 1);
1515
+ }
1516
+
1517
+ memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir0_end) - iir0)*sizeof(float));
1518
+ }
1519
+ }
1520
+ }
1521
+ }
1522
+
1523
+ static void * incr_ptr_aligned(void ** p, size_t size, size_t align) {
1524
+
1525
+ void * ptr = *p;
1526
+ ptr = (void *) GGML_PAD((uintptr_t) ptr, align);
1527
+ *p = (void *) ((char *) ptr + size);
1528
+ return ptr;
1529
+ }
1530
+
1531
+ static void ggml_compute_forward_mul_mat_id(
1532
+ const struct ggml_compute_params * params,
1533
+ struct ggml_tensor * dst) {
1534
+
1535
+ const struct ggml_tensor * src0 = dst->src[0];
1536
+ const struct ggml_tensor * src1 = dst->src[1];
1537
+ const struct ggml_tensor * ids = dst->src[2];
1538
+
1539
+ GGML_TENSOR_BINARY_OP_LOCALS
1540
+
1541
+ const int ith = params->ith;
1542
+ const int nth = params->nth;
1543
+
1544
+ const enum ggml_type type = src0->type;
1545
+
1546
+ const bool src1_cont = ggml_is_contiguous(src1);
1547
+
1548
+ enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type;
1549
+ ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float;
1550
+
1551
+ // we don't support permuted src0 or src1
1552
+ GGML_ASSERT(nb00 == ggml_type_size(type));
1553
+ GGML_ASSERT(nb10 == ggml_type_size(src1->type));
1554
+
1555
+ // dst cannot be transposed or permuted
1556
+ GGML_ASSERT(nb0 == sizeof(float));
1557
+ GGML_ASSERT(nb0 <= nb1);
1558
+ GGML_ASSERT(nb1 <= nb2);
1559
+ GGML_ASSERT(nb2 <= nb3);
1560
+
1561
+ // row groups
1562
+ const int n_ids = ids->ne[0]; // n_expert_used
1563
+ const int n_as = ne02; // n_expert
1564
+
1565
+ void * wdata_cur = params->wdata;
1566
+
1567
+ if (src1->type != vec_dot_type) {
1568
+ incr_ptr_aligned(&wdata_cur, ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t));
1569
+ }
1570
+
1571
+ int64_t * matrix_row_counts = // [n_as]
1572
+ incr_ptr_aligned(&wdata_cur, n_as*sizeof(int64_t), sizeof(int64_t));
1573
+
1574
+ struct mmid_row_mapping * matrix_rows = // [n_as][ids->ne[0]*ids->ne[1]]
1575
+ incr_ptr_aligned(&wdata_cur, n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping), sizeof(int64_t));
1576
+
1577
+ char (*atomic_current_chunk)[CACHE_LINE_SIZE] = // [n_as]
1578
+ incr_ptr_aligned(&wdata_cur, CACHE_LINE_SIZE * n_as, CACHE_LINE_SIZE);
1579
+
1580
+ GGML_ASSERT(params->wsize >= (size_t)((char *) wdata_cur - (char *) params->wdata));
1581
+
1582
+ if (src1->type != vec_dot_type) {
1583
+ char * wdata = params->wdata;
1584
+
1585
+ const size_t nbw0 = ggml_type_size(vec_dot_type);
1586
+ const size_t nbw1 = ggml_row_size(vec_dot_type, ne10);
1587
+ const size_t nbw2 = nbw1*ne11;
1588
+ const size_t nbw3 = nbw2*ne12;
1589
+
1590
+ assert(params->wsize >= ne13*nbw3);
1591
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
1592
+
1593
+ #if 0
1594
+ for (int64_t i13 = 0; i13 < ne13; ++i13) {
1595
+ for (int64_t i12 = ith; i12 < ne12; i12 += nth) {
1596
+ for (int64_t i11 = 0; i11 < ne11; ++i11) {
1597
+ from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11),
1598
+ (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1),
1599
+ ne10);
1600
+ }
1601
+ }
1602
+ }
1603
+ #else
1604
+ for (int64_t i13 = 0; i13 < ne13; ++i13) {
1605
+ for (int64_t i12 = 0; i12 < ne12; ++i12) {
1606
+ for (int64_t i11 = 0; i11 < ne11; ++i11) {
1607
+ size_t bs = ggml_blck_size(vec_dot_type);
1608
+ int64_t ne10_block_start = (ith * ne10/bs) / nth;
1609
+ int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth;
1610
+ from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10),
1611
+ (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0),
1612
+ (ne10_block_end - ne10_block_start) * bs);
1613
+ }
1614
+ }
1615
+ }
1616
+ #endif
1617
+ }
1618
+
1619
+ if (ith == 0) {
1620
+ // initialize matrix_row_counts
1621
+ memset(matrix_row_counts, 0, n_as*sizeof(int64_t));
1622
+
1623
+ // group rows by src0 matrix
1624
+ for (int64_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) {
1625
+ for (int id = 0; id < n_ids; ++id) {
1626
+ const int32_t i02 = *(const int32_t *) ((const char *) ids->data + iid1*ids->nb[1] + id*ids->nb[0]);
1627
+
1628
+ assert(i02 >= 0 && i02 < n_as);
1629
+
1630
+ MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = (struct mmid_row_mapping) {id, iid1};
1631
+ matrix_row_counts[i02] += 1;
1632
+ }
1633
+ }
1634
+ }
1635
+
1636
+ // reset current_chunk
1637
+ for (int cur_a = ith; cur_a < n_as; cur_a += nth) {
1638
+ atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a);
1639
+ *current_chunk_ctr = nth;
1640
+ }
1641
+
1642
+ ggml_barrier(params->threadpool);
1643
+
1644
+ for (int cur_a = 0; cur_a < n_as; ++cur_a) {
1645
+ const int64_t cne1 = matrix_row_counts[cur_a];
1646
+
1647
+ if (cne1 == 0) {
1648
+ continue;
1649
+ }
1650
+
1651
+ const char * src0_cur = (const char *) src0->data + cur_a * nb02;
1652
+ const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
1653
+ const size_t row_size = ggml_row_size(vec_dot_type, ne10);
1654
+
1655
+ const int64_t nr0 = ne01;
1656
+ const int64_t nr1 = cne1;
1657
+
1658
+ int chunk_size = 16;
1659
+ if (nr0 == 1 || nr1 == 1) {
1660
+ chunk_size = 64;
1661
+ }
1662
+
1663
+ #if defined(__aarch64__)
1664
+ // disable for ARM
1665
+ const bool disable_chunking = true;
1666
+ #else
1667
+ // disable for NUMA
1668
+ const bool disable_chunking = ggml_is_numa();
1669
+ #endif // defined(__aarch64__)
1670
+
1671
+ int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size;
1672
+ int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size;
1673
+
1674
+ if (nchunk0 * nchunk1 < nth * 4 || disable_chunking) {
1675
+ nchunk0 = nr0 > nr1 ? nth : 1;
1676
+ nchunk1 = nr0 > nr1 ? 1 : nth;
1677
+ }
1678
+
1679
+ const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0;
1680
+ const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1;
1681
+
1682
+ int current_chunk = ith;
1683
+
1684
+ atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a);
1685
+
1686
+ while (current_chunk < nchunk0 * nchunk1) {
1687
+ const int64_t ith0 = current_chunk % nchunk0;
1688
+ const int64_t ith1 = current_chunk / nchunk0;
1689
+
1690
+ const int64_t ir0_start = dr0 * ith0;
1691
+ const int64_t ir0_end = MIN(ir0_start + dr0, nr0);
1692
+
1693
+ const int64_t ir1_start = dr1 * ith1;
1694
+ const int64_t ir1_end = MIN(ir1_start + dr1, nr1);
1695
+
1696
+ ggml_compute_forward_mul_mat_id_one_chunk(
1697
+ dst, src0, src1, ids, cur_a,
1698
+ ir0_start, ir0_end, ir1_start, ir1_end,
1699
+ src0_cur, matrix_rows, row_size, src1_cont, wdata
1700
+ );
1701
+
1702
+ if (nth >= nchunk0 * nchunk1) {
1703
+ break;
1704
+ }
1705
+
1706
+ current_chunk = atomic_fetch_add_explicit(current_chunk_ctr, 1, memory_order_relaxed);
1707
+ }
1708
+ }
1709
+ }
1710
+
1711
+ /////////////////////////////////
1712
+
1713
+ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
1714
+ GGML_ASSERT(params);
1715
+
1716
+ if (tensor->op == GGML_OP_NONE || ggml_is_empty(tensor)) {
1717
+ return;
1718
+ }
1719
+
1720
+ // extra_buffer op?
1721
+ if (ggml_cpu_extra_compute_forward(params, tensor)) {
1722
+ return;
1723
+ }
1724
+
1725
+ switch (tensor->op) {
1726
+ case GGML_OP_DUP:
1727
+ {
1728
+ ggml_compute_forward_dup(params, tensor);
1729
+ } break;
1730
+ case GGML_OP_ADD:
1731
+ {
1732
+ ggml_compute_forward_add(params, tensor);
1733
+ } break;
1734
+ case GGML_OP_ADD1:
1735
+ {
1736
+ ggml_compute_forward_add1(params, tensor);
1737
+ } break;
1738
+ case GGML_OP_ACC:
1739
+ {
1740
+ ggml_compute_forward_acc(params, tensor);
1741
+ } break;
1742
+ case GGML_OP_SUB:
1743
+ {
1744
+ ggml_compute_forward_sub(params, tensor);
1745
+ } break;
1746
+ case GGML_OP_MUL:
1747
+ {
1748
+ ggml_compute_forward_mul(params, tensor);
1749
+ } break;
1750
+ case GGML_OP_DIV:
1751
+ {
1752
+ ggml_compute_forward_div(params, tensor);
1753
+ } break;
1754
+ case GGML_OP_SQR:
1755
+ {
1756
+ ggml_compute_forward_sqr(params, tensor);
1757
+ } break;
1758
+ case GGML_OP_SQRT:
1759
+ {
1760
+ ggml_compute_forward_sqrt(params, tensor);
1761
+ } break;
1762
+ case GGML_OP_LOG:
1763
+ {
1764
+ ggml_compute_forward_log(params, tensor);
1765
+ } break;
1766
+ case GGML_OP_SIN:
1767
+ {
1768
+ ggml_compute_forward_sin(params, tensor);
1769
+ } break;
1770
+ case GGML_OP_COS:
1771
+ {
1772
+ ggml_compute_forward_cos(params, tensor);
1773
+ } break;
1774
+ case GGML_OP_SUM:
1775
+ {
1776
+ ggml_compute_forward_sum(params, tensor);
1777
+ } break;
1778
+ case GGML_OP_SUM_ROWS:
1779
+ {
1780
+ ggml_compute_forward_sum_rows(params, tensor);
1781
+ } break;
1782
+ case GGML_OP_MEAN:
1783
+ {
1784
+ ggml_compute_forward_mean(params, tensor);
1785
+ } break;
1786
+ case GGML_OP_ARGMAX:
1787
+ {
1788
+ ggml_compute_forward_argmax(params, tensor);
1789
+ } break;
1790
+ case GGML_OP_COUNT_EQUAL:
1791
+ {
1792
+ ggml_compute_forward_count_equal(params, tensor);
1793
+ } break;
1794
+ case GGML_OP_REPEAT:
1795
+ {
1796
+ ggml_compute_forward_repeat(params, tensor);
1797
+ } break;
1798
+ case GGML_OP_REPEAT_BACK:
1799
+ {
1800
+ ggml_compute_forward_repeat_back(params, tensor);
1801
+ } break;
1802
+ case GGML_OP_CONCAT:
1803
+ {
1804
+ ggml_compute_forward_concat(params, tensor);
1805
+ } break;
1806
+ case GGML_OP_SILU_BACK:
1807
+ {
1808
+ ggml_compute_forward_silu_back(params, tensor);
1809
+ } break;
1810
+ case GGML_OP_NORM:
1811
+ {
1812
+ ggml_compute_forward_norm(params, tensor);
1813
+ } break;
1814
+ case GGML_OP_RMS_NORM:
1815
+ {
1816
+ ggml_compute_forward_rms_norm(params, tensor);
1817
+ } break;
1818
+ case GGML_OP_RMS_NORM_BACK:
1819
+ {
1820
+ ggml_compute_forward_rms_norm_back(params, tensor);
1821
+ } break;
1822
+ case GGML_OP_GROUP_NORM:
1823
+ {
1824
+ ggml_compute_forward_group_norm(params, tensor);
1825
+ } break;
1826
+ case GGML_OP_L2_NORM:
1827
+ {
1828
+ ggml_compute_forward_l2_norm(params, tensor);
1829
+ } break;
1830
+ case GGML_OP_MUL_MAT:
1831
+ {
1832
+ ggml_compute_forward_mul_mat(params, tensor);
1833
+ } break;
1834
+ case GGML_OP_MUL_MAT_ID:
1835
+ {
1836
+ ggml_compute_forward_mul_mat_id(params, tensor);
1837
+ } break;
1838
+ case GGML_OP_OUT_PROD:
1839
+ {
1840
+ ggml_compute_forward_out_prod(params, tensor);
1841
+ } break;
1842
+ case GGML_OP_SCALE:
1843
+ {
1844
+ ggml_compute_forward_scale(params, tensor);
1845
+ } break;
1846
+ case GGML_OP_SET:
1847
+ {
1848
+ ggml_compute_forward_set(params, tensor);
1849
+ } break;
1850
+ case GGML_OP_CPY:
1851
+ {
1852
+ ggml_compute_forward_cpy(params, tensor);
1853
+ } break;
1854
+ case GGML_OP_CONT:
1855
+ {
1856
+ ggml_compute_forward_cont(params, tensor);
1857
+ } break;
1858
+ case GGML_OP_RESHAPE:
1859
+ {
1860
+ ggml_compute_forward_reshape(params, tensor);
1861
+ } break;
1862
+ case GGML_OP_VIEW:
1863
+ {
1864
+ ggml_compute_forward_view(params, tensor);
1865
+ } break;
1866
+ case GGML_OP_PERMUTE:
1867
+ {
1868
+ ggml_compute_forward_permute(params, tensor);
1869
+ } break;
1870
+ case GGML_OP_TRANSPOSE:
1871
+ {
1872
+ ggml_compute_forward_transpose(params, tensor);
1873
+ } break;
1874
+ case GGML_OP_GET_ROWS:
1875
+ {
1876
+ ggml_compute_forward_get_rows(params, tensor);
1877
+ } break;
1878
+ case GGML_OP_GET_ROWS_BACK:
1879
+ {
1880
+ ggml_compute_forward_get_rows_back(params, tensor);
1881
+ } break;
1882
+ case GGML_OP_DIAG:
1883
+ {
1884
+ ggml_compute_forward_diag(params, tensor);
1885
+ } break;
1886
+ case GGML_OP_DIAG_MASK_INF:
1887
+ {
1888
+ ggml_compute_forward_diag_mask_inf(params, tensor);
1889
+ } break;
1890
+ case GGML_OP_DIAG_MASK_ZERO:
1891
+ {
1892
+ ggml_compute_forward_diag_mask_zero(params, tensor);
1893
+ } break;
1894
+ case GGML_OP_SOFT_MAX:
1895
+ {
1896
+ ggml_compute_forward_soft_max(params, tensor);
1897
+ } break;
1898
+ case GGML_OP_SOFT_MAX_BACK:
1899
+ {
1900
+ ggml_compute_forward_soft_max_ext_back(params, tensor);
1901
+ } break;
1902
+ case GGML_OP_ROPE:
1903
+ {
1904
+ ggml_compute_forward_rope(params, tensor);
1905
+ } break;
1906
+ case GGML_OP_ROPE_BACK:
1907
+ {
1908
+ ggml_compute_forward_rope_back(params, tensor);
1909
+ } break;
1910
+ case GGML_OP_CLAMP:
1911
+ {
1912
+ ggml_compute_forward_clamp(params, tensor);
1913
+ } break;
1914
+ case GGML_OP_CONV_TRANSPOSE_1D:
1915
+ {
1916
+ ggml_compute_forward_conv_transpose_1d(params, tensor);
1917
+ } break;
1918
+ case GGML_OP_IM2COL:
1919
+ {
1920
+ ggml_compute_forward_im2col(params, tensor);
1921
+ } break;
1922
+ case GGML_OP_IM2COL_BACK:
1923
+ {
1924
+ ggml_compute_forward_im2col_back_f32(params, tensor);
1925
+ } break;
1926
+ case GGML_OP_CONV_2D_DW:
1927
+ {
1928
+ ggml_compute_forward_conv_2d_dw(params, tensor);
1929
+ } break;
1930
+ case GGML_OP_CONV_TRANSPOSE_2D:
1931
+ {
1932
+ ggml_compute_forward_conv_transpose_2d(params, tensor);
1933
+ } break;
1934
+ case GGML_OP_POOL_1D:
1935
+ {
1936
+ ggml_compute_forward_pool_1d(params, tensor);
1937
+ } break;
1938
+ case GGML_OP_POOL_2D:
1939
+ {
1940
+ ggml_compute_forward_pool_2d(params, tensor);
1941
+ } break;
1942
+ case GGML_OP_POOL_2D_BACK:
1943
+ {
1944
+ ggml_compute_forward_pool_2d_back(params, tensor);
1945
+ } break;
1946
+ case GGML_OP_UPSCALE:
1947
+ {
1948
+ ggml_compute_forward_upscale(params, tensor);
1949
+ } break;
1950
+ case GGML_OP_PAD:
1951
+ {
1952
+ ggml_compute_forward_pad(params, tensor);
1953
+ } break;
1954
+ case GGML_OP_PAD_REFLECT_1D:
1955
+ {
1956
+ ggml_compute_forward_pad_reflect_1d(params, tensor);
1957
+ } break;
1958
+ case GGML_OP_ARANGE:
1959
+ {
1960
+ ggml_compute_forward_arange(params, tensor);
1961
+ } break;
1962
+ case GGML_OP_TIMESTEP_EMBEDDING:
1963
+ {
1964
+ ggml_compute_forward_timestep_embedding(params, tensor);
1965
+ } break;
1966
+ case GGML_OP_ARGSORT:
1967
+ {
1968
+ ggml_compute_forward_argsort(params, tensor);
1969
+ } break;
1970
+ case GGML_OP_LEAKY_RELU:
1971
+ {
1972
+ ggml_compute_forward_leaky_relu(params, tensor);
1973
+ } break;
1974
+ case GGML_OP_FLASH_ATTN_EXT:
1975
+ {
1976
+ ggml_compute_forward_flash_attn_ext(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor);
1977
+ } break;
1978
+ case GGML_OP_FLASH_ATTN_BACK:
1979
+ {
1980
+ int32_t t = ggml_get_op_params_i32(tensor, 0);
1981
+ GGML_ASSERT(t == 0 || t == 1);
1982
+ bool masked = t != 0;
1983
+ ggml_compute_forward_flash_attn_back(params, masked, tensor);
1984
+ } break;
1985
+ case GGML_OP_SSM_CONV:
1986
+ {
1987
+ ggml_compute_forward_ssm_conv(params, tensor);
1988
+ } break;
1989
+ case GGML_OP_SSM_SCAN:
1990
+ {
1991
+ ggml_compute_forward_ssm_scan(params, tensor);
1992
+ } break;
1993
+ case GGML_OP_WIN_PART:
1994
+ {
1995
+ ggml_compute_forward_win_part(params, tensor);
1996
+ } break;
1997
+ case GGML_OP_WIN_UNPART:
1998
+ {
1999
+ ggml_compute_forward_win_unpart(params, tensor);
2000
+ } break;
2001
+ case GGML_OP_UNARY:
2002
+ {
2003
+ ggml_compute_forward_unary(params, tensor);
2004
+ } break;
2005
+ case GGML_OP_GET_REL_POS:
2006
+ {
2007
+ ggml_compute_forward_get_rel_pos(params, tensor);
2008
+ } break;
2009
+ case GGML_OP_ADD_REL_POS:
2010
+ {
2011
+ ggml_compute_forward_add_rel_pos(params, tensor);
2012
+ } break;
2013
+ case GGML_OP_RWKV_WKV6:
2014
+ {
2015
+ ggml_compute_forward_rwkv_wkv6(params, tensor);
2016
+ } break;
2017
+ case GGML_OP_GATED_LINEAR_ATTN:
2018
+ {
2019
+ ggml_compute_forward_gla(params, tensor);
2020
+ } break;
2021
+ case GGML_OP_RWKV_WKV7:
2022
+ {
2023
+ ggml_compute_forward_rwkv_wkv7(params, tensor);
2024
+ } break;
2025
+ case GGML_OP_MAP_CUSTOM1:
2026
+ {
2027
+ ggml_compute_forward_map_custom1(params, tensor);
2028
+ }
2029
+ break;
2030
+ case GGML_OP_MAP_CUSTOM2:
2031
+ {
2032
+ ggml_compute_forward_map_custom2(params, tensor);
2033
+ }
2034
+ break;
2035
+ case GGML_OP_MAP_CUSTOM3:
2036
+ {
2037
+ ggml_compute_forward_map_custom3(params, tensor);
2038
+ }
2039
+ break;
2040
+ case GGML_OP_CUSTOM:
2041
+ {
2042
+ ggml_compute_forward_custom(params, tensor);
2043
+ }
2044
+ break;
2045
+ case GGML_OP_CROSS_ENTROPY_LOSS:
2046
+ {
2047
+ ggml_compute_forward_cross_entropy_loss(params, tensor);
2048
+ }
2049
+ break;
2050
+ case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
2051
+ {
2052
+ ggml_compute_forward_cross_entropy_loss_back(params, tensor);
2053
+ }
2054
+ break;
2055
+ case GGML_OP_OPT_STEP_ADAMW:
2056
+ {
2057
+ ggml_compute_forward_opt_step_adamw(params, tensor);
2058
+ }
2059
+ break;
2060
+ case GGML_OP_NONE:
2061
+ {
2062
+ // nop
2063
+ } break;
2064
+ case GGML_OP_COUNT:
2065
+ {
2066
+ GGML_ABORT("fatal error");
2067
+ }
2068
+ }
2069
+ }
2070
+
2071
+ // Android's libc implementation "bionic" does not support setting affinity
2072
+ #if defined(__gnu_linux__)
2073
+ static void set_numa_thread_affinity(int thread_n) {
2074
+ if (!ggml_is_numa()) {
2075
+ return;
2076
+ }
2077
+
2078
+ int node_num;
2079
+ int rv;
2080
+ size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
2081
+
2082
+ switch(g_state.numa.numa_strategy) {
2083
+ case GGML_NUMA_STRATEGY_DISTRIBUTE:
2084
+ // run thread on node_num thread_n / (threads per node)
2085
+ node_num = thread_n % g_state.numa.n_nodes;
2086
+ break;
2087
+ case GGML_NUMA_STRATEGY_ISOLATE:
2088
+ // run thread on current_node
2089
+ node_num = g_state.numa.current_node;
2090
+ break;
2091
+ case GGML_NUMA_STRATEGY_NUMACTL:
2092
+ // use the cpuset that numactl gave us
2093
+ rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset);
2094
+ if (rv) {
2095
+ fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv));
2096
+ }
2097
+ return;
2098
+ default:
2099
+ return;
2100
+ }
2101
+
2102
+ struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
2103
+
2104
+ cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
2105
+ CPU_ZERO_S(setsize, cpus);
2106
+ for (size_t i = 0; i < node->n_cpus; ++i) {
2107
+ CPU_SET_S(node->cpus[i], setsize, cpus);
2108
+ }
2109
+
2110
+ rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
2111
+ if (rv) {
2112
+ fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
2113
+ }
2114
+
2115
+ CPU_FREE(cpus);
2116
+ }
2117
+
2118
+ static void clear_numa_thread_affinity(void) {
2119
+ if (!ggml_is_numa()) {
2120
+ return;
2121
+ }
2122
+
2123
+ size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
2124
+
2125
+ cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
2126
+ CPU_ZERO_S(setsize, cpus);
2127
+ for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) {
2128
+ CPU_SET_S(i, setsize, cpus);
2129
+ }
2130
+
2131
+ int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
2132
+ if (rv) {
2133
+ fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
2134
+ }
2135
+
2136
+ CPU_FREE(cpus);
2137
+ }
2138
+ #else
2139
+ // TODO: Windows etc.
2140
+ // (the linux implementation may also work on BSD, someone should test)
2141
+ static void set_numa_thread_affinity(int thread_n) { UNUSED(thread_n); }
2142
+ static void clear_numa_thread_affinity(void) {}
2143
+ #endif
2144
+
2145
+ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
2146
+ int n_tasks = 0;
2147
+
2148
+ if (ggml_is_empty(node)) {
2149
+ // no need to multi-thread a no-op
2150
+ n_tasks = 1;
2151
+ return n_tasks;
2152
+ }
2153
+
2154
+ switch (node->op) {
2155
+ case GGML_OP_CPY:
2156
+ case GGML_OP_DUP:
2157
+ case GGML_OP_CONT:
2158
+ case GGML_OP_ADD:
2159
+ case GGML_OP_ADD1:
2160
+ case GGML_OP_ACC:
2161
+ {
2162
+ n_tasks = n_threads;
2163
+ } break;
2164
+ case GGML_OP_SUB:
2165
+ case GGML_OP_SQR:
2166
+ case GGML_OP_SQRT:
2167
+ case GGML_OP_LOG:
2168
+ case GGML_OP_SIN:
2169
+ case GGML_OP_COS:
2170
+ case GGML_OP_SUM:
2171
+ case GGML_OP_SUM_ROWS:
2172
+ case GGML_OP_MEAN:
2173
+ case GGML_OP_ARGMAX:
2174
+ {
2175
+ n_tasks = 1;
2176
+ } break;
2177
+ case GGML_OP_COUNT_EQUAL:
2178
+ {
2179
+ n_tasks = n_threads;
2180
+ } break;
2181
+ case GGML_OP_REPEAT:
2182
+ case GGML_OP_REPEAT_BACK:
2183
+ case GGML_OP_LEAKY_RELU:
2184
+ {
2185
+ n_tasks = 1;
2186
+ } break;
2187
+ case GGML_OP_UNARY:
2188
+ switch (ggml_get_unary_op(node)) {
2189
+ case GGML_UNARY_OP_ABS:
2190
+ case GGML_UNARY_OP_SGN:
2191
+ case GGML_UNARY_OP_NEG:
2192
+ case GGML_UNARY_OP_STEP:
2193
+ case GGML_UNARY_OP_TANH:
2194
+ case GGML_UNARY_OP_ELU:
2195
+ case GGML_UNARY_OP_RELU:
2196
+ case GGML_UNARY_OP_SIGMOID:
2197
+ case GGML_UNARY_OP_HARDSWISH:
2198
+ case GGML_UNARY_OP_HARDSIGMOID:
2199
+ case GGML_UNARY_OP_EXP:
2200
+ {
2201
+ n_tasks = 1;
2202
+ } break;
2203
+
2204
+ case GGML_UNARY_OP_GELU:
2205
+ case GGML_UNARY_OP_GELU_ERF:
2206
+ case GGML_UNARY_OP_GELU_QUICK:
2207
+ case GGML_UNARY_OP_SILU:
2208
+ {
2209
+ n_tasks = n_threads;
2210
+ } break;
2211
+ default:
2212
+ GGML_ABORT("fatal error");
2213
+ }
2214
+ break;
2215
+ case GGML_OP_SILU_BACK:
2216
+ case GGML_OP_MUL:
2217
+ case GGML_OP_DIV:
2218
+ case GGML_OP_NORM:
2219
+ case GGML_OP_RMS_NORM:
2220
+ case GGML_OP_RMS_NORM_BACK:
2221
+ case GGML_OP_L2_NORM:
2222
+ case GGML_OP_GROUP_NORM:
2223
+ case GGML_OP_CONCAT:
2224
+ case GGML_OP_MUL_MAT:
2225
+ case GGML_OP_MUL_MAT_ID:
2226
+ case GGML_OP_OUT_PROD:
2227
+ {
2228
+ n_tasks = n_threads;
2229
+ } break;
2230
+ case GGML_OP_GET_ROWS:
2231
+ {
2232
+ // FIXME: get_rows can use additional threads, but the cost of launching additional threads
2233
+ // decreases performance with GPU offloading
2234
+ //n_tasks = n_threads;
2235
+ n_tasks = 1;
2236
+ } break;
2237
+ case GGML_OP_SCALE:
2238
+ case GGML_OP_SET:
2239
+ case GGML_OP_RESHAPE:
2240
+ case GGML_OP_VIEW:
2241
+ case GGML_OP_PERMUTE:
2242
+ case GGML_OP_TRANSPOSE:
2243
+ case GGML_OP_GET_ROWS_BACK:
2244
+ case GGML_OP_DIAG:
2245
+ {
2246
+ n_tasks = 1;
2247
+ } break;
2248
+ case GGML_OP_DIAG_MASK_ZERO:
2249
+ case GGML_OP_DIAG_MASK_INF:
2250
+ case GGML_OP_SOFT_MAX_BACK:
2251
+ case GGML_OP_ROPE:
2252
+ case GGML_OP_ROPE_BACK:
2253
+ case GGML_OP_ADD_REL_POS:
2254
+ {
2255
+ n_tasks = n_threads;
2256
+ } break;
2257
+ case GGML_OP_CLAMP:
2258
+ {
2259
+ n_tasks = 1; //TODO
2260
+ } break;
2261
+ case GGML_OP_SOFT_MAX:
2262
+ {
2263
+ n_tasks = MIN(n_threads, ggml_nrows(node->src[0]));
2264
+ } break;
2265
+ case GGML_OP_IM2COL:
2266
+ case GGML_OP_IM2COL_BACK:
2267
+ case GGML_OP_CONV_2D_DW:
2268
+ case GGML_OP_CONV_TRANSPOSE_1D:
2269
+ case GGML_OP_CONV_TRANSPOSE_2D:
2270
+ {
2271
+ n_tasks = n_threads;
2272
+ } break;
2273
+ case GGML_OP_POOL_1D:
2274
+ case GGML_OP_POOL_2D:
2275
+ case GGML_OP_POOL_2D_BACK:
2276
+ {
2277
+ n_tasks = 1;
2278
+ } break;
2279
+ case GGML_OP_UPSCALE:
2280
+ case GGML_OP_PAD:
2281
+ case GGML_OP_PAD_REFLECT_1D:
2282
+ case GGML_OP_ARANGE:
2283
+ case GGML_OP_TIMESTEP_EMBEDDING:
2284
+ case GGML_OP_ARGSORT:
2285
+ case GGML_OP_FLASH_ATTN_EXT:
2286
+ case GGML_OP_FLASH_ATTN_BACK:
2287
+ case GGML_OP_SSM_CONV:
2288
+ case GGML_OP_SSM_SCAN:
2289
+ case GGML_OP_RWKV_WKV6:
2290
+ case GGML_OP_GATED_LINEAR_ATTN:
2291
+ case GGML_OP_RWKV_WKV7:
2292
+ {
2293
+ n_tasks = n_threads;
2294
+ } break;
2295
+ case GGML_OP_WIN_PART:
2296
+ case GGML_OP_WIN_UNPART:
2297
+ case GGML_OP_GET_REL_POS:
2298
+ {
2299
+ n_tasks = 1;
2300
+ } break;
2301
+ case GGML_OP_MAP_CUSTOM1:
2302
+ {
2303
+ struct ggml_map_custom1_op_params p;
2304
+ memcpy(&p, node->op_params, sizeof(p));
2305
+ if (p.n_tasks == GGML_N_TASKS_MAX) {
2306
+ n_tasks = n_threads;
2307
+ } else {
2308
+ n_tasks = MIN(p.n_tasks, n_threads);
2309
+ }
2310
+ } break;
2311
+ case GGML_OP_MAP_CUSTOM2:
2312
+ {
2313
+ struct ggml_map_custom2_op_params p;
2314
+ memcpy(&p, node->op_params, sizeof(p));
2315
+ if (p.n_tasks == GGML_N_TASKS_MAX) {
2316
+ n_tasks = n_threads;
2317
+ } else {
2318
+ n_tasks = MIN(p.n_tasks, n_threads);
2319
+ }
2320
+ } break;
2321
+ case GGML_OP_MAP_CUSTOM3:
2322
+ {
2323
+ struct ggml_map_custom3_op_params p;
2324
+ memcpy(&p, node->op_params, sizeof(p));
2325
+ if (p.n_tasks == GGML_N_TASKS_MAX) {
2326
+ n_tasks = n_threads;
2327
+ } else {
2328
+ n_tasks = MIN(p.n_tasks, n_threads);
2329
+ }
2330
+ } break;
2331
+ case GGML_OP_CUSTOM:
2332
+ {
2333
+ struct ggml_custom_op_params p;
2334
+ memcpy(&p, node->op_params, sizeof(p));
2335
+ if (p.n_tasks == GGML_N_TASKS_MAX) {
2336
+ n_tasks = n_threads;
2337
+ } else {
2338
+ n_tasks = MIN(p.n_tasks, n_threads);
2339
+ }
2340
+ } break;
2341
+ case GGML_OP_CROSS_ENTROPY_LOSS:
2342
+ case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
2343
+ case GGML_OP_OPT_STEP_ADAMW:
2344
+ {
2345
+ n_tasks = n_threads;
2346
+ } break;
2347
+ case GGML_OP_NONE:
2348
+ {
2349
+ n_tasks = 1;
2350
+ } break;
2351
+ case GGML_OP_COUNT:
2352
+ {
2353
+ GGML_ABORT("fatal error");
2354
+ }
2355
+ default:
2356
+ {
2357
+ fprintf(stderr, "%s: op not implemented: ", __func__);
2358
+ if (node->op < GGML_OP_COUNT) {
2359
+ fprintf(stderr, "%s\n", ggml_op_name(node->op));
2360
+ } else {
2361
+ fprintf(stderr, "%d\n", node->op);
2362
+ }
2363
+ GGML_ABORT("fatal error");
2364
+ }
2365
+ }
2366
+
2367
+ assert(n_tasks > 0);
2368
+
2369
+ return n_tasks;
2370
+ }
2371
+
2372
+ static thread_ret_t ggml_graph_compute_secondary_thread(void* data);
2373
+
2374
+ #if defined(_WIN32)
2375
+ #include "windows.h"
2376
+
2377
+ // TODO: support > 64 CPUs
2378
+ static bool ggml_thread_apply_affinity(bool * mask) {
2379
+ HANDLE h = GetCurrentThread();
2380
+ uint64_t bitmask = 0ULL;
2381
+
2382
+ assert(GGML_MAX_N_THREADS >= 64);
2383
+
2384
+ for (int32_t i = 0; i < 8; i++) {
2385
+ int32_t idx = i * 8;
2386
+ uint8_t val = 0;
2387
+ val |= mask[idx + 0] << 0;
2388
+ val |= mask[idx + 1] << 1;
2389
+ val |= mask[idx + 2] << 2;
2390
+ val |= mask[idx + 3] << 3;
2391
+ val |= mask[idx + 4] << 4;
2392
+ val |= mask[idx + 5] << 5;
2393
+ val |= mask[idx + 6] << 6;
2394
+ val |= mask[idx + 7] << 7;
2395
+ bitmask |= (uint64_t)val << idx;
2396
+ }
2397
+
2398
+ for (int32_t i = 64; i < GGML_MAX_N_THREADS; i++) {
2399
+ if (mask[i]) {
2400
+ fprintf(stderr, "warn: setting thread-affinity for > 64 CPUs isn't supported on windows!\n");
2401
+ break;
2402
+ }
2403
+ }
2404
+
2405
+ DWORD_PTR m = (DWORD_PTR)bitmask;
2406
+
2407
+ m = SetThreadAffinityMask(h, m);
2408
+
2409
+ return m != 0;
2410
+ }
2411
+
2412
+ static bool ggml_thread_apply_priority(int32_t prio) {
2413
+ // Note that on Windows the Process Priority Class must be updated in order to set Thread priority.
2414
+ // This is up to the applications.
2415
+ DWORD p = THREAD_PRIORITY_NORMAL;
2416
+ switch (prio) {
2417
+ case GGML_SCHED_PRIO_NORMAL: p = THREAD_PRIORITY_NORMAL; break;
2418
+ case GGML_SCHED_PRIO_MEDIUM: p = THREAD_PRIORITY_ABOVE_NORMAL; break;
2419
+ case GGML_SCHED_PRIO_HIGH: p = THREAD_PRIORITY_HIGHEST; break;
2420
+ case GGML_SCHED_PRIO_REALTIME: p = THREAD_PRIORITY_TIME_CRITICAL; break;
2421
+ }
2422
+
2423
+ if (prio == GGML_SCHED_PRIO_NORMAL) {
2424
+ // Keep inherited policy/priority
2425
+ return true;
2426
+ }
2427
+
2428
+ if (!SetThreadPriority(GetCurrentThread(), p)) {
2429
+ fprintf(stderr, "warn: failed to set thread priority %d : (%d)\n", prio, (int) GetLastError());
2430
+ return false;
2431
+ }
2432
+
2433
+ return true;
2434
+ }
2435
+
2436
+ #elif defined(__APPLE__)
2437
+ #include <sys/types.h>
2438
+ #include <sys/resource.h>
2439
+
2440
+ static bool ggml_thread_apply_affinity(const bool * mask) {
2441
+ // Not supported on Apple platforms
2442
+ UNUSED(mask);
2443
+ return true;
2444
+ }
2445
+
2446
+ static bool ggml_thread_apply_priority(int32_t prio) {
2447
+ struct sched_param p;
2448
+ int32_t policy = SCHED_OTHER;
2449
+ switch (prio) {
2450
+ case GGML_SCHED_PRIO_NORMAL: policy = SCHED_OTHER; p.sched_priority = 0; break;
2451
+ case GGML_SCHED_PRIO_MEDIUM: policy = SCHED_FIFO; p.sched_priority = 40; break;
2452
+ case GGML_SCHED_PRIO_HIGH: policy = SCHED_FIFO; p.sched_priority = 80; break;
2453
+ case GGML_SCHED_PRIO_REALTIME: policy = SCHED_FIFO; p.sched_priority = 90; break;
2454
+ }
2455
+
2456
+ if (prio == GGML_SCHED_PRIO_NORMAL) {
2457
+ // Keep inherited policy/priority
2458
+ return true;
2459
+ }
2460
+
2461
+ int32_t err = pthread_setschedparam(pthread_self(), policy, &p);
2462
+ if (err != 0) {
2463
+ fprintf(stderr, "warn: failed to set thread priority %d : %s (%d)\n", prio, strerror(err), err);
2464
+ return false;
2465
+ }
2466
+
2467
+ return true;
2468
+ }
2469
+
2470
+ #elif defined(__gnu_linux__)
2471
+ // TODO: this may not work on BSD, to be verified
2472
+
2473
+ static bool ggml_thread_apply_affinity(const bool * mask) {
2474
+ cpu_set_t cpuset;
2475
+ int err;
2476
+
2477
+ CPU_ZERO(&cpuset);
2478
+
2479
+ for (uint32_t i = 0; i < GGML_MAX_N_THREADS; i++) {
2480
+ if (mask[i]) {
2481
+ GGML_PRINT_DEBUG("Thread %lx: adding %d to cpuset\n", pthread_self(), i);
2482
+ CPU_SET(i, &cpuset);
2483
+ }
2484
+ }
2485
+
2486
+ #ifdef __ANDROID__
2487
+ err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
2488
+ if (err < 0) {
2489
+ err = errno;
2490
+ }
2491
+ #else
2492
+ err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
2493
+ #endif
2494
+ if (err != 0) {
2495
+ fprintf(stderr, "warn: failed to set affinity mask 0x%llx : %s (%d)\n", (unsigned long long)mask, strerror(err), err);
2496
+ return false;
2497
+ }
2498
+
2499
+ return true;
2500
+ }
2501
+
2502
+ static bool ggml_thread_apply_priority(int32_t prio) {
2503
+ struct sched_param p;
2504
+ int32_t policy = SCHED_OTHER;
2505
+ switch (prio) {
2506
+ case GGML_SCHED_PRIO_NORMAL: policy = SCHED_OTHER; p.sched_priority = 0; break;
2507
+ case GGML_SCHED_PRIO_MEDIUM: policy = SCHED_FIFO; p.sched_priority = 40; break;
2508
+ case GGML_SCHED_PRIO_HIGH: policy = SCHED_FIFO; p.sched_priority = 80; break;
2509
+ case GGML_SCHED_PRIO_REALTIME: policy = SCHED_FIFO; p.sched_priority = 90; break;
2510
+ }
2511
+
2512
+ if (prio == GGML_SCHED_PRIO_NORMAL) {
2513
+ // Keep inherited policy/priority
2514
+ return true;
2515
+ }
2516
+
2517
+ int32_t err = pthread_setschedparam(pthread_self(), policy, &p);
2518
+ if (err != 0) {
2519
+ fprintf(stderr, "warn: failed to set thread priority %d : %s (%d)\n", prio, strerror(err), err);
2520
+ return false;
2521
+ }
2522
+
2523
+ return true;
2524
+ }
2525
+
2526
+ #else // unsupported platforms
2527
+
2528
+ static bool ggml_thread_apply_affinity(const bool * mask) {
2529
+ UNUSED(mask);
2530
+ return true;
2531
+ }
2532
+
2533
+ static bool ggml_thread_apply_priority(int32_t prio) {
2534
+ UNUSED(prio);
2535
+ return true;
2536
+ }
2537
+
2538
+ #endif
2539
+
2540
+ static bool ggml_thread_cpumask_is_valid(const bool * mask) {
2541
+ for (int i = 0; i < GGML_MAX_N_THREADS; i++) {
2542
+ if (mask[i]) { return true; }
2543
+ }
2544
+ return false;
2545
+ }
2546
+
2547
+ static void ggml_thread_cpumask_next(const bool * global_mask, bool * local_mask, bool strict, int32_t* iter) {
2548
+ if (!strict) {
2549
+ memcpy(local_mask, global_mask, GGML_MAX_N_THREADS);
2550
+ return;
2551
+ } else {
2552
+ memset(local_mask, 0, GGML_MAX_N_THREADS);
2553
+ int32_t base_idx = *iter;
2554
+ for (int32_t i = 0; i < GGML_MAX_N_THREADS; i++) {
2555
+ int32_t idx = base_idx + i;
2556
+ if (idx >= GGML_MAX_N_THREADS) {
2557
+ // Just a cheaper modulo
2558
+ idx -= GGML_MAX_N_THREADS;
2559
+ }
2560
+ if (global_mask[idx]) {
2561
+ local_mask[idx] = 1;
2562
+ *iter = idx + 1;
2563
+ return;
2564
+ }
2565
+ }
2566
+ }
2567
+ }
2568
+
2569
+ void ggml_threadpool_free(struct ggml_threadpool* threadpool) {
2570
+ if (!threadpool) return;
2571
+
2572
+ const int n_threads = threadpool->n_threads_max;
2573
+
2574
+ #ifndef GGML_USE_OPENMP
2575
+ struct ggml_compute_state* workers = threadpool->workers;
2576
+
2577
+ ggml_mutex_lock(&threadpool->mutex);
2578
+
2579
+ threadpool->stop = true;
2580
+ threadpool->pause = false;
2581
+
2582
+ ggml_cond_broadcast(&threadpool->cond);
2583
+ ggml_mutex_unlock(&threadpool->mutex);
2584
+
2585
+ for (int j = 1; j < n_threads; j++) {
2586
+ int32_t rc = ggml_thread_join(workers[j].thrd, NULL);
2587
+ GGML_ASSERT(rc == GGML_EXIT_SUCCESS || rc == GGML_EXIT_ABORTED);
2588
+ UNUSED(rc);
2589
+ }
2590
+
2591
+ ggml_mutex_destroy(&threadpool->mutex);
2592
+ ggml_cond_destroy(&threadpool->cond);
2593
+ #endif // GGML_USE_OPENMP
2594
+
2595
+ const size_t workers_size = sizeof(struct ggml_compute_state) * n_threads;
2596
+ ggml_aligned_free(threadpool->workers, workers_size);
2597
+ ggml_aligned_free(threadpool, sizeof(struct ggml_threadpool));
2598
+ }
2599
+
2600
+ #ifndef GGML_USE_OPENMP
2601
+ // pause/resume must be called under mutex
2602
+ static void ggml_threadpool_pause_locked(struct ggml_threadpool * threadpool) {
2603
+ GGML_PRINT_DEBUG("Pausing threadpool\n");
2604
+ threadpool->pause = true;
2605
+ ggml_cond_broadcast(&threadpool->cond);
2606
+ }
2607
+
2608
+ static void ggml_threadpool_resume_locked(struct ggml_threadpool * threadpool) {
2609
+ GGML_PRINT_DEBUG("Resuming threadpool\n");
2610
+ threadpool->pause = false;
2611
+ ggml_cond_broadcast(&threadpool->cond);
2612
+ }
2613
+ #endif
2614
+
2615
+ void ggml_threadpool_pause(struct ggml_threadpool * threadpool) {
2616
+ #ifndef GGML_USE_OPENMP
2617
+ ggml_mutex_lock(&threadpool->mutex);
2618
+ if (!threadpool->pause) {
2619
+ ggml_threadpool_pause_locked(threadpool);
2620
+ }
2621
+ ggml_mutex_unlock(&threadpool->mutex);
2622
+ #else
2623
+ UNUSED(threadpool);
2624
+ #endif
2625
+ }
2626
+
2627
+ void ggml_threadpool_resume(struct ggml_threadpool * threadpool) {
2628
+ #ifndef GGML_USE_OPENMP
2629
+ ggml_mutex_lock(&threadpool->mutex);
2630
+ if (threadpool->pause) {
2631
+ ggml_threadpool_resume_locked(threadpool);
2632
+ }
2633
+ ggml_mutex_unlock(&threadpool->mutex);
2634
+ #else
2635
+ UNUSED(threadpool);
2636
+ #endif
2637
+ }
2638
+
2639
+ struct ggml_cplan ggml_graph_plan(
2640
+ const struct ggml_cgraph * cgraph,
2641
+ int n_threads,
2642
+ struct ggml_threadpool * threadpool) {
2643
+
2644
+ if (threadpool == NULL) {
2645
+ //GGML_PRINT_DEBUG("Threadpool is not specified. Will create a disposable threadpool : n_threads %d\n", n_threads);
2646
+ }
2647
+ if (n_threads <= 0) {
2648
+ n_threads = threadpool ? threadpool->n_threads_max : GGML_DEFAULT_N_THREADS;
2649
+ }
2650
+
2651
+ size_t work_size = 0;
2652
+
2653
+ struct ggml_cplan cplan;
2654
+ memset(&cplan, 0, sizeof(struct ggml_cplan));
2655
+
2656
+ int max_tasks = 1;
2657
+
2658
+ // thread scheduling for the different operations + work buffer size estimation
2659
+ for (int i = 0; i < cgraph->n_nodes; i++) {
2660
+ struct ggml_tensor * node = cgraph->nodes[i];
2661
+
2662
+ const int n_tasks = ggml_get_n_tasks(node, n_threads);
2663
+
2664
+ max_tasks = MAX(max_tasks, n_tasks);
2665
+
2666
+ size_t cur = 0;
2667
+
2668
+ if (!ggml_cpu_extra_work_size(n_threads, node, &cur)) {
2669
+ switch (node->op) {
2670
+ case GGML_OP_CPY:
2671
+ case GGML_OP_DUP:
2672
+ {
2673
+ if (ggml_is_quantized(node->type) ||
2674
+ // F16 -> BF16 and BF16 -> F16 copies go through intermediate F32
2675
+ (node->src[0]->type == GGML_TYPE_F16 && node->src[1] && node->src[1]->type == GGML_TYPE_BF16) ||
2676
+ (node->src[0]->type == GGML_TYPE_BF16 && node->src[1] && node->src[1]->type == GGML_TYPE_F16)) {
2677
+ cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
2678
+ }
2679
+ } break;
2680
+ case GGML_OP_ADD:
2681
+ case GGML_OP_ADD1:
2682
+ {
2683
+ if (ggml_is_quantized(node->src[0]->type)) {
2684
+ cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
2685
+ }
2686
+ } break;
2687
+ case GGML_OP_ACC:
2688
+ {
2689
+ if (ggml_is_quantized(node->src[0]->type)) {
2690
+ cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks;
2691
+ }
2692
+ } break;
2693
+ case GGML_OP_COUNT_EQUAL:
2694
+ {
2695
+ cur = ggml_type_size(node->type)*n_tasks;
2696
+ } break;
2697
+ case GGML_OP_MUL_MAT:
2698
+ {
2699
+ const enum ggml_type vec_dot_type = type_traits_cpu[node->src[0]->type].vec_dot_type;
2700
+
2701
+ if (node->src[1]->type != vec_dot_type) {
2702
+ cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1]));
2703
+ }
2704
+ } break;
2705
+ case GGML_OP_MUL_MAT_ID:
2706
+ {
2707
+ cur = 0;
2708
+ const struct ggml_tensor * src0 = node->src[0];
2709
+ const struct ggml_tensor * src1 = node->src[1];
2710
+ const struct ggml_tensor * ids = node->src[2];
2711
+ const enum ggml_type vec_dot_type = type_traits_cpu[src0->type].vec_dot_type;
2712
+ const int n_as = src0->ne[2];
2713
+ // src1
2714
+ if (src1->type != vec_dot_type) {
2715
+ cur += ggml_row_size(vec_dot_type, ggml_nelements(src1)) + sizeof(int64_t);
2716
+ }
2717
+ // matrix_row_counts
2718
+ cur += n_as * sizeof(int64_t) + sizeof(int64_t);
2719
+ // matrix_rows
2720
+ cur += n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping) + sizeof(int64_t);
2721
+ // atomic_current_chunk
2722
+ cur += CACHE_LINE_SIZE*n_as + CACHE_LINE_SIZE;
2723
+ } break;
2724
+ case GGML_OP_OUT_PROD:
2725
+ {
2726
+ if (ggml_is_quantized(node->src[0]->type)) {
2727
+ cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
2728
+ }
2729
+ } break;
2730
+ case GGML_OP_SOFT_MAX:
2731
+ case GGML_OP_ROPE:
2732
+ case GGML_OP_ROPE_BACK:
2733
+ {
2734
+ cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
2735
+ } break;
2736
+ case GGML_OP_CONV_TRANSPOSE_1D:
2737
+ {
2738
+ GGML_ASSERT(node->src[0]->ne[3] == 1);
2739
+ GGML_ASSERT(node->src[1]->ne[2] == 1);
2740
+ GGML_ASSERT(node->src[1]->ne[3] == 1);
2741
+
2742
+ const int64_t ne00 = node->src[0]->ne[0]; // K
2743
+ const int64_t ne01 = node->src[0]->ne[1]; // Cout
2744
+ const int64_t ne02 = node->src[0]->ne[2]; // Cin
2745
+ const int64_t ne10 = node->src[1]->ne[0]; // L
2746
+ const int64_t ne11 = node->src[1]->ne[1]; // Cin
2747
+
2748
+ if ((node->src[0]->type == GGML_TYPE_F16 ||
2749
+ node->src[0]->type == GGML_TYPE_BF16) &&
2750
+ node->src[1]->type == GGML_TYPE_F32) {
2751
+ cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02;
2752
+ cur += sizeof(ggml_fp16_t)*ne10*ne11;
2753
+ } else if (node->src[0]->type == GGML_TYPE_F32 &&
2754
+ node->src[1]->type == GGML_TYPE_F32) {
2755
+ cur += sizeof(float)*ne00*ne01*ne02;
2756
+ cur += sizeof(float)*ne10*ne11;
2757
+ } else {
2758
+ GGML_ABORT("fatal error");
2759
+ }
2760
+ } break;
2761
+ case GGML_OP_CONV_TRANSPOSE_2D:
2762
+ {
2763
+ const int64_t ne00 = node->src[0]->ne[0]; // W
2764
+ const int64_t ne01 = node->src[0]->ne[1]; // H
2765
+ const int64_t ne02 = node->src[0]->ne[2]; // Channels Out
2766
+ const int64_t ne03 = node->src[0]->ne[3]; // Channels In
2767
+
2768
+ const int64_t ne10 = node->src[1]->ne[0]; // W
2769
+ const int64_t ne11 = node->src[1]->ne[1]; // H
2770
+ const int64_t ne12 = node->src[1]->ne[2]; // Channels In
2771
+
2772
+ cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03;
2773
+ cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12;
2774
+ } break;
2775
+ case GGML_OP_FLASH_ATTN_EXT:
2776
+ {
2777
+ const int64_t ne10 = node->src[1]->ne[0]; // DK
2778
+ const int64_t ne20 = node->src[2]->ne[0]; // DV
2779
+
2780
+ cur = sizeof(float)*(1*ne10 + 2*ne20)*n_tasks; // 1x head size K + 2x head size V (per thread)
2781
+ } break;
2782
+ case GGML_OP_FLASH_ATTN_BACK:
2783
+ {
2784
+ const int64_t D = node->src[0]->ne[0];
2785
+ const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
2786
+ const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
2787
+ if (node->src[1]->type == GGML_TYPE_F32) {
2788
+ cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
2789
+ cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
2790
+ } else if (node->src[1]->type == GGML_TYPE_F16) {
2791
+ cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
2792
+ cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
2793
+ } else if (node->src[1]->type == GGML_TYPE_BF16) {
2794
+ cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
2795
+ cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
2796
+ }
2797
+ } break;
2798
+
2799
+ case GGML_OP_CROSS_ENTROPY_LOSS:
2800
+ {
2801
+ cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
2802
+ } break;
2803
+ case GGML_OP_COUNT:
2804
+ {
2805
+ GGML_ABORT("fatal error");
2806
+ }
2807
+ default:
2808
+ break;
2809
+ }
2810
+ }
2811
+
2812
+ work_size = MAX(work_size, cur);
2813
+ }
2814
+
2815
+ if (work_size > 0) {
2816
+ work_size += CACHE_LINE_SIZE*(n_threads);
2817
+ }
2818
+
2819
+ cplan.threadpool = threadpool;
2820
+ cplan.n_threads = MIN(max_tasks, n_threads);
2821
+ cplan.work_size = work_size;
2822
+ cplan.work_data = NULL;
2823
+
2824
+ return cplan;
2825
+ }
2826
+
2827
+ static thread_ret_t ggml_graph_compute_thread(void * data) {
2828
+ struct ggml_compute_state * state = (struct ggml_compute_state *) data;
2829
+ struct ggml_threadpool * tp = state->threadpool;
2830
+
2831
+ const struct ggml_cgraph * cgraph = tp->cgraph;
2832
+ const struct ggml_cplan * cplan = tp->cplan;
2833
+
2834
+ set_numa_thread_affinity(state->ith);
2835
+
2836
+ struct ggml_compute_params params = {
2837
+ /*.ith =*/ state->ith,
2838
+ /*.nth =*/ atomic_load_explicit(&tp->n_threads_cur, memory_order_relaxed),
2839
+ /*.wsize =*/ cplan->work_size,
2840
+ /*.wdata =*/ cplan->work_data,
2841
+ /*.threadpool=*/ tp,
2842
+ };
2843
+
2844
+ for (int node_n = 0; node_n < cgraph->n_nodes && atomic_load_explicit(&tp->abort, memory_order_relaxed) != node_n; node_n++) {
2845
+ struct ggml_tensor * node = cgraph->nodes[node_n];
2846
+
2847
+ ggml_compute_forward(&params, node);
2848
+
2849
+ if (state->ith == 0 && cplan->abort_callback &&
2850
+ cplan->abort_callback(cplan->abort_callback_data)) {
2851
+ atomic_store_explicit(&tp->abort, node_n + 1, memory_order_relaxed);
2852
+ tp->ec = GGML_STATUS_ABORTED;
2853
+ }
2854
+
2855
+ if (node_n + 1 < cgraph->n_nodes) {
2856
+ ggml_barrier(state->threadpool);
2857
+ }
2858
+ }
2859
+
2860
+ ggml_barrier(state->threadpool);
2861
+
2862
+ return 0;
2863
+ }
2864
+
2865
+ #ifndef GGML_USE_OPENMP
2866
+
2867
+ // check if thread is active
2868
+ static inline bool ggml_graph_compute_thread_active(struct ggml_compute_state * state) {
2869
+ struct ggml_threadpool * threadpool = state->threadpool;
2870
+ int n_threads = atomic_load_explicit(&threadpool->n_threads_cur, memory_order_relaxed);
2871
+ return (state->ith < n_threads);
2872
+ }
2873
+
2874
+ // check if thread is ready to proceed (exit from polling or sleeping)
2875
+ static inline bool ggml_graph_compute_thread_ready(struct ggml_compute_state * state) {
2876
+ struct ggml_threadpool * threadpool = state->threadpool;
2877
+
2878
+ if (state->pending || threadpool->stop || threadpool->pause) { return true; }
2879
+
2880
+ // check for new graph/work
2881
+ int new_graph = atomic_load_explicit(&threadpool->n_graph, memory_order_relaxed);
2882
+ if (new_graph != state->last_graph) {
2883
+ state->pending = ggml_graph_compute_thread_active(state);
2884
+ state->last_graph = new_graph;
2885
+ }
2886
+
2887
+ return state->pending;
2888
+ }
2889
+
2890
+ // sync thread state after polling
2891
+ static inline void ggml_graph_compute_thread_sync(struct ggml_compute_state * state) {
2892
+ // TSAN doesn't support standalone fence yet, we use a dummy read-modify-write instead
2893
+ #ifdef GGML_TSAN_ENABLED
2894
+ atomic_fetch_add_explicit(&state->threadpool->n_graph, 0, memory_order_seq_cst);
2895
+ #else
2896
+ atomic_thread_fence(memory_order_seq_cst);
2897
+ #endif
2898
+ UNUSED(state);
2899
+ }
2900
+
2901
+ static inline bool ggml_graph_compute_poll_for_work(struct ggml_compute_state * state) {
2902
+ struct ggml_threadpool * threadpool = state->threadpool;
2903
+
2904
+ // Skip polling for unused threads
2905
+ if (!ggml_graph_compute_thread_active(state)) {
2906
+ return state->pending;
2907
+ }
2908
+
2909
+ // This seems to make 0 ... 100 a decent range for polling level across modern processors.
2910
+ // Perhaps, we can adjust it dynamically based on load and things.
2911
+ const uint64_t n_rounds = 1024UL * 128 * threadpool->poll;
2912
+
2913
+ for (uint64_t i=0; !ggml_graph_compute_thread_ready(state) && i < n_rounds; i++) {
2914
+ // No new work. Keep polling.
2915
+ ggml_thread_cpu_relax();
2916
+ }
2917
+
2918
+ return state->pending;
2919
+ }
2920
+
2921
+ static inline bool ggml_graph_compute_check_for_work(struct ggml_compute_state * state) {
2922
+ struct ggml_threadpool * threadpool = state->threadpool;
2923
+
2924
+ if (ggml_graph_compute_poll_for_work(state)) {
2925
+ ggml_graph_compute_thread_sync(state);
2926
+ return state->pending;
2927
+ }
2928
+
2929
+ ggml_mutex_lock_shared(&threadpool->mutex);
2930
+ while (!ggml_graph_compute_thread_ready(state)) {
2931
+ // No new work. Wait for the signal.
2932
+ GGML_PRINT_DEBUG("thread #%d waiting for work (sleeping)\n", state->ith);
2933
+ ggml_cond_wait(&threadpool->cond, &threadpool->mutex);
2934
+ }
2935
+ ggml_mutex_unlock_shared(&threadpool->mutex);
2936
+
2937
+ return state->pending;
2938
+ }
2939
+
2940
+ static thread_ret_t ggml_graph_compute_secondary_thread(void* data) {
2941
+ struct ggml_compute_state * state = (struct ggml_compute_state *) data;
2942
+ struct ggml_threadpool * threadpool = state->threadpool;
2943
+
2944
+ ggml_thread_apply_priority(threadpool->prio);
2945
+ if (ggml_thread_cpumask_is_valid(state->cpumask)) {
2946
+ ggml_thread_apply_affinity(state->cpumask);
2947
+ }
2948
+
2949
+ while (true) {
2950
+ // Check if we need to sleep
2951
+ while (threadpool->pause) {
2952
+ GGML_PRINT_DEBUG("thread #%d inside pause loop\n", state->ith);
2953
+ ggml_mutex_lock_shared(&threadpool->mutex);
2954
+ if (threadpool->pause) {
2955
+ ggml_cond_wait(&threadpool->cond, &threadpool->mutex);
2956
+ }
2957
+ GGML_PRINT_DEBUG("thread #%d resuming after wait\n", state->ith);
2958
+ ggml_mutex_unlock_shared(&threadpool->mutex);
2959
+ }
2960
+
2961
+ // This needs to be checked for after the cond_wait
2962
+ if (threadpool->stop) break;
2963
+
2964
+ // Check if there is new work
2965
+ // The main thread is the only one that can dispatch new work
2966
+
2967
+ ggml_graph_compute_check_for_work(state);
2968
+ if (state->pending) {
2969
+ state->pending = false;
2970
+
2971
+ ggml_graph_compute_thread(state);
2972
+ }
2973
+ }
2974
+
2975
+ return (thread_ret_t) 0;
2976
+ }
2977
+
2978
+ // Start processing new graph
2979
+ static void ggml_graph_compute_kickoff(struct ggml_threadpool * threadpool, int n_threads)
2980
+ {
2981
+ // Always take the mutex here because the worker threads are doing hybrid poll/wait
2982
+
2983
+ ggml_mutex_lock(&threadpool->mutex);
2984
+
2985
+ GGML_PRINT_DEBUG("threadpool: n_threads_cur %d n_threads %d\n", threadpool->n_threads_cur, n_threads);
2986
+
2987
+ // Update the number of active threads
2988
+ atomic_store_explicit(&threadpool->n_threads_cur, n_threads, memory_order_relaxed);
2989
+
2990
+ // Indicate the graph is ready to be processed
2991
+ // We need the full seq-cst fence here because of the polling threads (used in thread_sync)
2992
+ atomic_fetch_add_explicit(&threadpool->n_graph, 1, memory_order_seq_cst);
2993
+
2994
+ if (threadpool->pause) {
2995
+ // Update main thread prio and affinity to match the threadpool settings
2996
+ ggml_thread_apply_priority(threadpool->prio);
2997
+ if (ggml_thread_cpumask_is_valid(threadpool->workers[0].cpumask)) {
2998
+ ggml_thread_apply_affinity(threadpool->workers[0].cpumask);
2999
+ }
3000
+
3001
+ // resume does cond broadcast
3002
+ ggml_threadpool_resume_locked(threadpool);
3003
+ } else {
3004
+ ggml_cond_broadcast(&threadpool->cond);
3005
+ }
3006
+
3007
+ ggml_mutex_unlock(&threadpool->mutex);
3008
+ }
3009
+
3010
+ #endif // GGML_USE_OPENMP
3011
+
3012
+ static struct ggml_threadpool * ggml_threadpool_new_impl(
3013
+ struct ggml_threadpool_params * tpp,
3014
+ struct ggml_cgraph * cgraph,
3015
+ struct ggml_cplan * cplan) {
3016
+
3017
+ struct ggml_threadpool * threadpool =
3018
+ ggml_aligned_malloc(sizeof(struct ggml_threadpool));
3019
+ {
3020
+ threadpool->cgraph = cgraph;
3021
+ threadpool->cplan = cplan;
3022
+ threadpool->n_graph = 0;
3023
+ threadpool->n_barrier = 0;
3024
+ threadpool->n_barrier_passed = 0;
3025
+ threadpool->current_chunk = 0;
3026
+ threadpool->stop = false;
3027
+ threadpool->pause = tpp->paused;
3028
+ threadpool->abort = -1;
3029
+ threadpool->workers = NULL;
3030
+ threadpool->n_threads_max = tpp->n_threads;
3031
+ threadpool->n_threads_cur = tpp->n_threads;
3032
+ threadpool->poll = tpp->poll;
3033
+ threadpool->prio = tpp->prio;
3034
+ threadpool->ec = GGML_STATUS_SUCCESS;
3035
+ }
3036
+
3037
+ // Allocate and init workers state
3038
+ const size_t workers_size = sizeof(struct ggml_compute_state) * tpp->n_threads;
3039
+ struct ggml_compute_state * workers = ggml_aligned_malloc(workers_size);
3040
+
3041
+ memset(workers, 0, workers_size);
3042
+ for (int j = 0; j < tpp->n_threads; j++) {
3043
+ workers[j].threadpool = threadpool;
3044
+ workers[j].ith = j;
3045
+ }
3046
+
3047
+ threadpool->workers = workers;
3048
+
3049
+ #ifndef GGML_USE_OPENMP
3050
+ ggml_mutex_init(&threadpool->mutex);
3051
+ ggml_cond_init(&threadpool->cond);
3052
+
3053
+ // Spin the threads for all workers, and update CPU placements.
3054
+ // Place the main thread last (towards the higher numbered CPU cores).
3055
+
3056
+ int32_t cpumask_iter = 0;
3057
+
3058
+ for (int j = 1; j < tpp->n_threads; j++) {
3059
+ ggml_thread_cpumask_next(tpp->cpumask, workers[j].cpumask, tpp->strict_cpu, &cpumask_iter);
3060
+
3061
+ int32_t rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_secondary_thread, &workers[j]);
3062
+ GGML_ASSERT(rc == 0);
3063
+ }
3064
+
3065
+ ggml_thread_cpumask_next(tpp->cpumask, workers[0].cpumask, tpp->strict_cpu, &cpumask_iter);
3066
+
3067
+ if (!threadpool->pause) {
3068
+ // Update main thread prio and affinity at the start, otherwise we'll do it in resume
3069
+ ggml_thread_apply_priority(threadpool->prio);
3070
+ if (ggml_thread_cpumask_is_valid(threadpool->workers[0].cpumask)) {
3071
+ ggml_thread_apply_affinity(threadpool->workers[0].cpumask);
3072
+ }
3073
+ }
3074
+ #endif // GGML_USE_OPENMP
3075
+
3076
+ return threadpool;
3077
+ }
3078
+
3079
+ struct ggml_threadpool * ggml_threadpool_new(struct ggml_threadpool_params * tpp) {
3080
+ return ggml_threadpool_new_impl(tpp, NULL, NULL);
3081
+ }
3082
+
3083
+ enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
3084
+ ggml_cpu_init();
3085
+
3086
+ GGML_ASSERT(cplan);
3087
+ GGML_ASSERT(cplan->n_threads > 0);
3088
+ GGML_ASSERT(cplan->work_size == 0 || cplan->work_data != NULL);
3089
+
3090
+ int n_threads = cplan->n_threads;
3091
+ struct ggml_threadpool * threadpool = cplan->threadpool;
3092
+
3093
+ bool disposable_threadpool = false;
3094
+
3095
+ if (threadpool == NULL) {
3096
+ //GGML_PRINT_DEBUG("Threadpool is not specified. Will create a disposable threadpool : n_threads %d\n", n_threads);
3097
+ disposable_threadpool = true;
3098
+
3099
+ struct ggml_threadpool_params ttp = ggml_threadpool_params_default(n_threads);
3100
+ threadpool = ggml_threadpool_new_impl(&ttp, cgraph, cplan);
3101
+ } else {
3102
+ // Reset some of the parameters that need resetting
3103
+ // No worker threads should be accessing the parameters below at this stage
3104
+ threadpool->cgraph = cgraph;
3105
+ threadpool->cplan = cplan;
3106
+ threadpool->current_chunk = 0;
3107
+ threadpool->abort = -1;
3108
+ threadpool->ec = GGML_STATUS_SUCCESS;
3109
+ }
3110
+
3111
+ #ifdef GGML_USE_OPENMP
3112
+ if (n_threads > 1) {
3113
+ #pragma omp parallel num_threads(n_threads)
3114
+ {
3115
+ #pragma omp single
3116
+ {
3117
+ // update the number of threads from the actual number of threads that we got from OpenMP
3118
+ n_threads = omp_get_num_threads();
3119
+ atomic_store_explicit(&threadpool->n_threads_cur, n_threads, memory_order_relaxed);
3120
+ }
3121
+
3122
+ ggml_graph_compute_thread(&threadpool->workers[omp_get_thread_num()]);
3123
+ }
3124
+ } else {
3125
+ atomic_store_explicit(&threadpool->n_threads_cur, 1, memory_order_relaxed);
3126
+ ggml_graph_compute_thread(&threadpool->workers[0]);
3127
+ }
3128
+ #else
3129
+ if (n_threads > threadpool->n_threads_max) {
3130
+ GGML_LOG_WARN("cplan requested more threads (%d) than available (%d)\n", n_threads, threadpool->n_threads_max);
3131
+ n_threads = threadpool->n_threads_max;
3132
+ }
3133
+
3134
+ // Kick all threads to start the new graph
3135
+ ggml_graph_compute_kickoff(threadpool, n_threads);
3136
+
3137
+ // This is a work thread too
3138
+ ggml_graph_compute_thread(&threadpool->workers[0]);
3139
+ #endif
3140
+
3141
+ // don't leave affinity set on the main thread
3142
+ clear_numa_thread_affinity();
3143
+
3144
+ enum ggml_status ret = threadpool->ec;
3145
+
3146
+ if (disposable_threadpool) {
3147
+ ggml_threadpool_free(threadpool);
3148
+ }
3149
+
3150
+ return ret;
3151
+ }
3152
+
3153
+ enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
3154
+ struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads, NULL);
3155
+
3156
+ cplan.work_data = (uint8_t *)ggml_new_buffer(ctx, cplan.work_size);
3157
+
3158
+ return ggml_graph_compute(cgraph, &cplan);
3159
+ }
3160
+
3161
+ void ggml_cpu_fp32_to_fp16(const float * x, ggml_fp16_t * y, int64_t n) {
3162
+ int64_t i = 0;
3163
+ #if defined(__F16C__)
3164
+ #if defined(__AVX512F__)
3165
+ for (; i + 15 < n; i += 16) {
3166
+ __m512 x_vec = _mm512_loadu_ps(x + i);
3167
+ __m256i y_vec = _mm512_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
3168
+ _mm256_storeu_si256((__m256i *)(y + i), y_vec);
3169
+ }
3170
+ #endif
3171
+ for (; i + 7 < n; i += 8) {
3172
+ __m256 x_vec = _mm256_loadu_ps(x + i);
3173
+ __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
3174
+ _mm_storeu_si128((__m128i *)(y + i), y_vec);
3175
+ }
3176
+ for (; i + 3 < n; i += 4) {
3177
+ __m128 x_vec = _mm_loadu_ps(x + i);
3178
+ __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
3179
+ _mm_storel_epi64((__m128i *)(y + i), y_vec);
3180
+ }
3181
+ #endif
3182
+ for (; i < n; ++i) {
3183
+ y[i] = GGML_FP32_TO_FP16(x[i]);
3184
+ }
3185
+ }
3186
+
3187
+ void ggml_cpu_fp16_to_fp32(const ggml_fp16_t * x, float * y, int64_t n) {
3188
+ int64_t i = 0;
3189
+ #if defined(__F16C__)
3190
+ #if defined(__AVX512F__)
3191
+ for (; i + 15 < n; i += 16) {
3192
+ __m256i x_vec = _mm256_loadu_si256((const __m256i *)(x + i));
3193
+ __m512 y_vec = _mm512_cvtph_ps(x_vec);
3194
+ _mm512_storeu_ps(y + i, y_vec);
3195
+ }
3196
+ #endif
3197
+ for (; i + 7 < n; i += 8) {
3198
+ __m128i x_vec = _mm_loadu_si128((const __m128i *)(x + i));
3199
+ __m256 y_vec = _mm256_cvtph_ps(x_vec);
3200
+ _mm256_storeu_ps(y + i, y_vec);
3201
+ }
3202
+ for (; i + 3 < n; i += 4) {
3203
+ __m128i x_vec = _mm_loadl_epi64((const __m128i *)(x + i));
3204
+ __m128 y_vec = _mm_cvtph_ps(x_vec);
3205
+ _mm_storeu_ps(y + i, y_vec);
3206
+ }
3207
+ #endif
3208
+ for (; i < n; ++i) {
3209
+ y[i] = GGML_FP16_TO_FP32(x[i]);
3210
+ }
3211
+ }
3212
+
3213
+ void ggml_cpu_fp32_to_bf16(const float * x, ggml_bf16_t * y, int64_t n) {
3214
+ int64_t i = 0;
3215
+ for (; i < n; ++i) {
3216
+ y[i] = GGML_FP32_TO_BF16(x[i]);
3217
+ }
3218
+ }
3219
+
3220
+ void ggml_cpu_bf16_to_fp32(const ggml_bf16_t * x, float * y, int64_t n) {
3221
+ int64_t i = 0;
3222
+ #if defined(__AVX2__)
3223
+ #if defined(__AVX512F__)
3224
+ for (; i + 15 < n; i += 16) {
3225
+ _mm512_storeu_ps(y + i,
3226
+ _mm512_castsi512_ps(
3227
+ _mm512_slli_epi32(
3228
+ _mm512_cvtepu16_epi32(
3229
+ _mm256_loadu_si256(
3230
+ (const __m256i *)(x + i))),
3231
+ 16)));
3232
+ }
3233
+ #endif
3234
+ for (; i + 7 < n; i += 8) {
3235
+ _mm256_storeu_ps(y + i,
3236
+ _mm256_castsi256_ps(
3237
+ _mm256_slli_epi32(
3238
+ _mm256_cvtepu16_epi32(
3239
+ _mm_loadu_si128(
3240
+ (const __m128i *)(x + i))),
3241
+ 16)));
3242
+ }
3243
+ #endif
3244
+ for (; i < n; i++) {
3245
+ y[i] = GGML_BF16_TO_FP32(x[i]);
3246
+ }
3247
+ }
3248
+
3249
+ int ggml_cpu_has_avx(void) {
3250
+ #if defined(__AVX__)
3251
+ return 1;
3252
+ #else
3253
+ return 0;
3254
+ #endif
3255
+ }
3256
+
3257
+ int ggml_cpu_has_avx_vnni(void) {
3258
+ #if defined(__AVXVNNI__)
3259
+ return 1;
3260
+ #else
3261
+ return 0;
3262
+ #endif
3263
+ }
3264
+
3265
+ int ggml_cpu_has_avx2(void) {
3266
+ #if defined(__AVX2__)
3267
+ return 1;
3268
+ #else
3269
+ return 0;
3270
+ #endif
3271
+ }
3272
+
3273
+ int ggml_cpu_has_avx512(void) {
3274
+ #if defined(__AVX512F__)
3275
+ return 1;
3276
+ #else
3277
+ return 0;
3278
+ #endif
3279
+ }
3280
+
3281
+ int ggml_cpu_has_avx512_vbmi(void) {
3282
+ #if defined(__AVX512VBMI__)
3283
+ return 1;
3284
+ #else
3285
+ return 0;
3286
+ #endif
3287
+ }
3288
+
3289
+ int ggml_cpu_has_avx512_vnni(void) {
3290
+ #if defined(__AVX512VNNI__)
3291
+ return 1;
3292
+ #else
3293
+ return 0;
3294
+ #endif
3295
+ }
3296
+
3297
+ int ggml_cpu_has_avx512_bf16(void) {
3298
+ #if defined(__AVX512BF16__)
3299
+ return 1;
3300
+ #else
3301
+ return 0;
3302
+ #endif
3303
+ }
3304
+
3305
+ int ggml_cpu_has_amx_int8(void) {
3306
+ #if defined(__AMX_INT8__)
3307
+ return 1;
3308
+ #else
3309
+ return 0;
3310
+ #endif
3311
+ }
3312
+
3313
+ int ggml_cpu_has_bmi2(void) {
3314
+ #if defined(__BMI2__)
3315
+ return 1;
3316
+ #else
3317
+ return 0;
3318
+ #endif
3319
+ }
3320
+
3321
+ int ggml_cpu_has_fma(void) {
3322
+ #if defined(__FMA__)
3323
+ return 1;
3324
+ #else
3325
+ return 0;
3326
+ #endif
3327
+ }
3328
+
3329
+ int ggml_cpu_has_arm_fma(void) {
3330
+ #if defined(__ARM_FEATURE_FMA)
3331
+ return 1;
3332
+ #else
3333
+ return 0;
3334
+ #endif
3335
+ }
3336
+
3337
+ int ggml_cpu_has_riscv_v(void) {
3338
+ #if defined(__riscv_v_intrinsic)
3339
+ return 1;
3340
+ #else
3341
+ return 0;
3342
+ #endif
3343
+ }
3344
+
3345
+ int ggml_cpu_has_f16c(void) {
3346
+ #if defined(__F16C__)
3347
+ return 1;
3348
+ #else
3349
+ return 0;
3350
+ #endif
3351
+ }
3352
+
3353
+ int ggml_cpu_has_fp16_va(void) {
3354
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
3355
+ return 1;
3356
+ #else
3357
+ return 0;
3358
+ #endif
3359
+ }
3360
+
3361
+ int ggml_cpu_has_wasm_simd(void) {
3362
+ #if defined(__wasm_simd128__)
3363
+ return 1;
3364
+ #else
3365
+ return 0;
3366
+ #endif
3367
+ }
3368
+
3369
+ int ggml_cpu_has_llamafile(void) {
3370
+ #if defined(GGML_USE_LLAMAFILE)
3371
+ return 1;
3372
+ #else
3373
+ return 0;
3374
+ #endif
3375
+ }
3376
+
3377
+ int ggml_cpu_has_sse3(void) {
3378
+ #if defined(__SSE3__)
3379
+ return 1;
3380
+ #else
3381
+ return 0;
3382
+ #endif
3383
+ }
3384
+
3385
+ int ggml_cpu_has_ssse3(void) {
3386
+ #if defined(__SSSE3__)
3387
+ return 1;
3388
+ #else
3389
+ return 0;
3390
+ #endif
3391
+ }
3392
+
3393
+ int ggml_cpu_has_vsx(void) {
3394
+ #if defined(__POWER9_VECTOR__)
3395
+ return 1;
3396
+ #else
3397
+ return 0;
3398
+ #endif
3399
+ }
3400
+
3401
+ int ggml_cpu_has_vxe(void) {
3402
+ #if defined(__VXE__) || defined(__VXE2__)
3403
+ return 1;
3404
+ #else
3405
+ return 0;
3406
+ #endif
3407
+ }
3408
+
3409
+ int ggml_cpu_has_neon(void) {
3410
+ #if defined(__ARM_ARCH) && defined(__ARM_NEON)
3411
+ return ggml_arm_arch_features.has_neon;
3412
+ #else
3413
+ return 0;
3414
+ #endif
3415
+ }
3416
+
3417
+ int ggml_cpu_has_dotprod(void) {
3418
+ #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_DOTPROD)
3419
+ return ggml_arm_arch_features.has_dotprod;
3420
+ #else
3421
+ return 0;
3422
+ #endif
3423
+ }
3424
+
3425
+ int ggml_cpu_has_sve(void) {
3426
+ #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE)
3427
+ return ggml_arm_arch_features.has_sve;
3428
+ #else
3429
+ return 0;
3430
+ #endif
3431
+ }
3432
+
3433
+ int ggml_cpu_has_matmul_int8(void) {
3434
+ #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_MATMUL_INT8)
3435
+ return ggml_arm_arch_features.has_i8mm;
3436
+ #else
3437
+ return 0;
3438
+ #endif
3439
+ }
3440
+
3441
+ int ggml_cpu_get_sve_cnt(void) {
3442
+ #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE)
3443
+ return ggml_arm_arch_features.sve_cnt;
3444
+ #else
3445
+ return 0;
3446
+ #endif
3447
+ }
3448
+
3449
+ int ggml_cpu_has_sme(void) {
3450
+ #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SME)
3451
+ return ggml_arm_arch_features.has_sme;
3452
+ #else
3453
+ return 0;
3454
+ #endif
3455
+ }
3456
+
3457
+ void ggml_cpu_init(void) {
3458
+ // needed to initialize f16 tables
3459
+ {
3460
+ struct ggml_init_params params = { 0, NULL, false };
3461
+ struct ggml_context * ctx = ggml_init(params);
3462
+ ggml_free(ctx);
3463
+ }
3464
+
3465
+ ggml_critical_section_start();
3466
+
3467
+ static bool is_first_call = true;
3468
+
3469
+ if (is_first_call) {
3470
+ // initialize GELU, Quick GELU, SILU and EXP F32 tables
3471
+ {
3472
+ const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
3473
+
3474
+ for (int i = 0; i < (1 << 16); ++i) {
3475
+ union {
3476
+ uint16_t u16;
3477
+ ggml_fp16_t fp16;
3478
+ } u = {i};
3479
+ float f = GGML_FP16_TO_FP32(u.fp16);
3480
+ ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
3481
+ ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
3482
+ }
3483
+
3484
+ const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
3485
+
3486
+ GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0);
3487
+
3488
+ #ifdef GGML_USE_OPENMP
3489
+ //if (!getenv("OMP_WAIT_POLICY")) {
3490
+ // // set the wait policy to active, so that OpenMP threads don't sleep
3491
+ // putenv("OMP_WAIT_POLICY=active");
3492
+ //}
3493
+
3494
+ if (!getenv("KMP_BLOCKTIME")) {
3495
+ // set the time to wait before sleeping a thread
3496
+ // this is less aggressive than setting the wait policy to active, but should achieve similar results in most cases
3497
+ putenv("KMP_BLOCKTIME=200"); // 200ms
3498
+ }
3499
+ #endif
3500
+ }
3501
+
3502
+ #if defined(__ARM_ARCH)
3503
+ ggml_init_arm_arch_features();
3504
+ #endif
3505
+
3506
+ is_first_call = false;
3507
+ }
3508
+
3509
+ ggml_critical_section_end();
3510
+ }