whispercpp 1.3.1 → 1.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (857) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +7 -3
  3. data/README.md +161 -43
  4. data/Rakefile +45 -13
  5. data/ext/.gitignore +4 -8
  6. data/ext/dependencies.rb +73 -0
  7. data/ext/extconf.rb +21 -198
  8. data/ext/options.rb +85 -0
  9. data/ext/ruby_whisper.c +177 -0
  10. data/ext/ruby_whisper.h +17 -2
  11. data/ext/ruby_whisper_context.c +672 -0
  12. data/ext/ruby_whisper_error.c +52 -0
  13. data/ext/ruby_whisper_model.c +232 -0
  14. data/ext/ruby_whisper_params.c +1303 -0
  15. data/ext/ruby_whisper_segment.c +220 -0
  16. data/ext/ruby_whisper_transcribe.cpp +93 -0
  17. data/ext/ruby_whisper_vad_params.c +288 -0
  18. data/ext/sources/CMakeGraphVizOptions.cmake +8 -0
  19. data/ext/sources/CMakeLists.txt +255 -0
  20. data/ext/sources/bindings/javascript/CMakeLists.txt +41 -0
  21. data/ext/sources/bindings/javascript/emscripten.cpp +93 -0
  22. data/ext/sources/bindings/javascript/libwhisper.worker.js +1 -0
  23. data/ext/sources/bindings/javascript/package-tmpl.json +26 -0
  24. data/ext/sources/bindings/javascript/package.json +26 -0
  25. data/ext/sources/bindings/javascript/whisper.js +19 -0
  26. data/ext/sources/build-xcframework.sh +547 -0
  27. data/ext/sources/cmake/DefaultTargetOptions.cmake +16 -0
  28. data/ext/sources/cmake/FindFFmpeg.cmake +163 -0
  29. data/ext/sources/cmake/build-info.cmake +60 -0
  30. data/ext/sources/cmake/git-vars.cmake +22 -0
  31. data/ext/sources/cmake/whisper-config.cmake.in +65 -0
  32. data/ext/sources/cmake/whisper.pc.in +10 -0
  33. data/ext/sources/examples/CMakeLists.txt +124 -0
  34. data/ext/sources/examples/addon.node/CMakeLists.txt +31 -0
  35. data/ext/sources/examples/addon.node/__test__/whisper.spec.js +133 -0
  36. data/ext/sources/examples/addon.node/addon.cpp +557 -0
  37. data/ext/sources/examples/addon.node/index.js +57 -0
  38. data/ext/sources/examples/addon.node/package.json +16 -0
  39. data/ext/sources/examples/addon.node/vad-example.js +132 -0
  40. data/ext/sources/examples/bench/CMakeLists.txt +8 -0
  41. data/ext/sources/examples/bench/bench.cpp +176 -0
  42. data/ext/sources/examples/bench.wasm/CMakeLists.txt +49 -0
  43. data/ext/sources/examples/bench.wasm/emscripten.cpp +87 -0
  44. data/ext/sources/examples/bench.wasm/index-tmpl.html +284 -0
  45. data/ext/sources/examples/cli/CMakeLists.txt +8 -0
  46. data/ext/sources/examples/cli/cli.cpp +1295 -0
  47. data/ext/sources/examples/coi-serviceworker.js +146 -0
  48. data/ext/sources/examples/command/CMakeLists.txt +10 -0
  49. data/ext/sources/examples/command/command.cpp +800 -0
  50. data/ext/sources/examples/command/commands.txt +9 -0
  51. data/ext/sources/examples/command.wasm/CMakeLists.txt +50 -0
  52. data/ext/sources/examples/command.wasm/emscripten.cpp +327 -0
  53. data/ext/sources/examples/command.wasm/index-tmpl.html +414 -0
  54. data/ext/sources/examples/common-ggml.cpp +238 -0
  55. data/ext/sources/examples/common-ggml.h +18 -0
  56. data/ext/sources/examples/common-sdl.cpp +227 -0
  57. data/ext/sources/examples/common-sdl.h +49 -0
  58. data/ext/sources/examples/common-whisper.cpp +175 -0
  59. data/ext/sources/examples/common-whisper.h +24 -0
  60. data/ext/sources/examples/common.cpp +675 -0
  61. data/ext/sources/examples/common.h +322 -0
  62. data/ext/sources/examples/deprecation-warning/CMakeLists.txt +6 -0
  63. data/ext/sources/examples/deprecation-warning/deprecation-warning.cpp +38 -0
  64. data/ext/sources/examples/ffmpeg-transcode.cpp +368 -0
  65. data/ext/sources/examples/generate-karaoke.sh +57 -0
  66. data/ext/sources/examples/grammar-parser.cpp +423 -0
  67. data/ext/sources/examples/grammar-parser.h +29 -0
  68. data/ext/sources/examples/helpers.js +191 -0
  69. data/ext/sources/examples/json.hpp +24596 -0
  70. data/ext/sources/examples/livestream.sh +112 -0
  71. data/ext/sources/examples/lsp/CMakeLists.txt +9 -0
  72. data/ext/sources/examples/lsp/lsp.cpp +469 -0
  73. data/ext/sources/examples/lsp/whisper.vim +362 -0
  74. data/ext/sources/examples/miniaudio.h +93468 -0
  75. data/ext/sources/examples/python/test_whisper_processor.py +7 -0
  76. data/ext/sources/examples/python/whisper_processor.py +54 -0
  77. data/ext/sources/examples/quantize/CMakeLists.txt +6 -0
  78. data/ext/sources/examples/quantize/quantize.cpp +226 -0
  79. data/ext/sources/examples/server/CMakeLists.txt +15 -0
  80. data/ext/sources/examples/server/bench.js +29 -0
  81. data/ext/sources/examples/server/httplib.h +10497 -0
  82. data/ext/sources/examples/server/server.cpp +1238 -0
  83. data/ext/sources/examples/server.py +115 -0
  84. data/ext/sources/examples/stb_vorbis.c +5584 -0
  85. data/ext/sources/examples/stream/CMakeLists.txt +10 -0
  86. data/ext/sources/examples/stream/stream.cpp +435 -0
  87. data/ext/sources/examples/stream.wasm/CMakeLists.txt +49 -0
  88. data/ext/sources/examples/stream.wasm/emscripten.cpp +216 -0
  89. data/ext/sources/examples/stream.wasm/index-tmpl.html +414 -0
  90. data/ext/sources/examples/sycl/CMakeLists.txt +9 -0
  91. data/ext/sources/examples/sycl/build.sh +22 -0
  92. data/ext/sources/examples/sycl/ls-sycl-device.cpp +11 -0
  93. data/ext/sources/examples/sycl/run-whisper.sh +17 -0
  94. data/ext/sources/examples/talk-llama/CMakeLists.txt +43 -0
  95. data/ext/sources/examples/talk-llama/eleven-labs.py +80 -0
  96. data/ext/sources/examples/talk-llama/llama-adapter.cpp +388 -0
  97. data/ext/sources/examples/talk-llama/llama-adapter.h +76 -0
  98. data/ext/sources/examples/talk-llama/llama-arch.cpp +1914 -0
  99. data/ext/sources/examples/talk-llama/llama-arch.h +464 -0
  100. data/ext/sources/examples/talk-llama/llama-batch.cpp +843 -0
  101. data/ext/sources/examples/talk-llama/llama-batch.h +147 -0
  102. data/ext/sources/examples/talk-llama/llama-chat.cpp +685 -0
  103. data/ext/sources/examples/talk-llama/llama-chat.h +59 -0
  104. data/ext/sources/examples/talk-llama/llama-context.cpp +2845 -0
  105. data/ext/sources/examples/talk-llama/llama-context.h +297 -0
  106. data/ext/sources/examples/talk-llama/llama-cparams.cpp +5 -0
  107. data/ext/sources/examples/talk-llama/llama-cparams.h +41 -0
  108. data/ext/sources/examples/talk-llama/llama-grammar.cpp +1229 -0
  109. data/ext/sources/examples/talk-llama/llama-grammar.h +173 -0
  110. data/ext/sources/examples/talk-llama/llama-graph.cpp +1693 -0
  111. data/ext/sources/examples/talk-llama/llama-graph.h +710 -0
  112. data/ext/sources/examples/talk-llama/llama-hparams.cpp +103 -0
  113. data/ext/sources/examples/talk-llama/llama-hparams.h +207 -0
  114. data/ext/sources/examples/talk-llama/llama-impl.cpp +167 -0
  115. data/ext/sources/examples/talk-llama/llama-impl.h +61 -0
  116. data/ext/sources/examples/talk-llama/llama-io.cpp +15 -0
  117. data/ext/sources/examples/talk-llama/llama-io.h +35 -0
  118. data/ext/sources/examples/talk-llama/llama-kv-cache-unified-iswa.cpp +279 -0
  119. data/ext/sources/examples/talk-llama/llama-kv-cache-unified-iswa.h +128 -0
  120. data/ext/sources/examples/talk-llama/llama-kv-cache-unified.cpp +1841 -0
  121. data/ext/sources/examples/talk-llama/llama-kv-cache-unified.h +303 -0
  122. data/ext/sources/examples/talk-llama/llama-kv-cache.h +44 -0
  123. data/ext/sources/examples/talk-llama/llama-kv-cells.h +439 -0
  124. data/ext/sources/examples/talk-llama/llama-memory-hybrid.cpp +246 -0
  125. data/ext/sources/examples/talk-llama/llama-memory-hybrid.h +138 -0
  126. data/ext/sources/examples/talk-llama/llama-memory-recurrent.cpp +1125 -0
  127. data/ext/sources/examples/talk-llama/llama-memory-recurrent.h +183 -0
  128. data/ext/sources/examples/talk-llama/llama-memory.cpp +59 -0
  129. data/ext/sources/examples/talk-llama/llama-memory.h +116 -0
  130. data/ext/sources/examples/talk-llama/llama-mmap.cpp +600 -0
  131. data/ext/sources/examples/talk-llama/llama-mmap.h +68 -0
  132. data/ext/sources/examples/talk-llama/llama-model-loader.cpp +1163 -0
  133. data/ext/sources/examples/talk-llama/llama-model-loader.h +169 -0
  134. data/ext/sources/examples/talk-llama/llama-model-saver.cpp +282 -0
  135. data/ext/sources/examples/talk-llama/llama-model-saver.h +37 -0
  136. data/ext/sources/examples/talk-llama/llama-model.cpp +15114 -0
  137. data/ext/sources/examples/talk-llama/llama-model.h +452 -0
  138. data/ext/sources/examples/talk-llama/llama-quant.cpp +1049 -0
  139. data/ext/sources/examples/talk-llama/llama-quant.h +1 -0
  140. data/ext/sources/examples/talk-llama/llama-sampling.cpp +2575 -0
  141. data/ext/sources/examples/talk-llama/llama-sampling.h +32 -0
  142. data/ext/sources/examples/talk-llama/llama-vocab.cpp +3377 -0
  143. data/ext/sources/examples/talk-llama/llama-vocab.h +132 -0
  144. data/ext/sources/examples/talk-llama/llama.cpp +358 -0
  145. data/ext/sources/examples/talk-llama/llama.h +1484 -0
  146. data/ext/sources/examples/talk-llama/prompts/talk-alpaca.txt +23 -0
  147. data/ext/sources/examples/talk-llama/speak +40 -0
  148. data/ext/sources/examples/talk-llama/speak.bat +1 -0
  149. data/ext/sources/examples/talk-llama/speak.ps1 +14 -0
  150. data/ext/sources/examples/talk-llama/talk-llama.cpp +810 -0
  151. data/ext/sources/examples/talk-llama/unicode-data.cpp +7034 -0
  152. data/ext/sources/examples/talk-llama/unicode-data.h +20 -0
  153. data/ext/sources/examples/talk-llama/unicode.cpp +854 -0
  154. data/ext/sources/examples/talk-llama/unicode.h +66 -0
  155. data/ext/sources/examples/vad-speech-segments/CMakeLists.txt +8 -0
  156. data/ext/sources/examples/vad-speech-segments/speech.cpp +149 -0
  157. data/ext/sources/examples/wchess/CMakeLists.txt +10 -0
  158. data/ext/sources/examples/wchess/libwchess/CMakeLists.txt +19 -0
  159. data/ext/sources/examples/wchess/libwchess/Chessboard.cpp +803 -0
  160. data/ext/sources/examples/wchess/libwchess/Chessboard.h +33 -0
  161. data/ext/sources/examples/wchess/libwchess/WChess.cpp +193 -0
  162. data/ext/sources/examples/wchess/libwchess/WChess.h +63 -0
  163. data/ext/sources/examples/wchess/libwchess/test-chessboard.cpp +117 -0
  164. data/ext/sources/examples/wchess/wchess.cmd/CMakeLists.txt +8 -0
  165. data/ext/sources/examples/wchess/wchess.cmd/wchess.cmd.cpp +251 -0
  166. data/ext/sources/examples/whisper.wasm/CMakeLists.txt +50 -0
  167. data/ext/sources/examples/whisper.wasm/emscripten.cpp +118 -0
  168. data/ext/sources/examples/whisper.wasm/index-tmpl.html +658 -0
  169. data/ext/sources/ggml/CMakeLists.txt +435 -0
  170. data/ext/sources/ggml/cmake/BuildTypes.cmake +54 -0
  171. data/ext/sources/ggml/cmake/GitVars.cmake +22 -0
  172. data/ext/sources/ggml/cmake/common.cmake +50 -0
  173. data/ext/sources/ggml/cmake/ggml-config.cmake.in +152 -0
  174. data/ext/{ggml → sources/ggml}/include/ggml-alloc.h +1 -1
  175. data/ext/{ggml → sources/ggml}/include/ggml-backend.h +10 -8
  176. data/ext/{ggml → sources/ggml}/include/ggml-cpp.h +2 -1
  177. data/ext/{ggml → sources/ggml}/include/ggml-cpu.h +11 -1
  178. data/ext/{ggml → sources/ggml}/include/ggml-metal.h +1 -1
  179. data/ext/{ggml → sources/ggml}/include/ggml-opt.h +49 -28
  180. data/ext/{ggml → sources/ggml}/include/ggml-rpc.h +6 -1
  181. data/ext/{ggml → sources/ggml}/include/ggml-vulkan.h +0 -2
  182. data/ext/{ggml → sources/ggml}/include/ggml.h +325 -269
  183. data/ext/sources/ggml/include/gguf.h +202 -0
  184. data/ext/sources/ggml/src/CMakeLists.txt +404 -0
  185. data/ext/{ggml → sources/ggml}/src/ggml-alloc.c +34 -29
  186. data/ext/sources/ggml/src/ggml-amx/CMakeLists.txt +107 -0
  187. data/ext/{ggml → sources/ggml}/src/ggml-backend-impl.h +1 -2
  188. data/ext/{ggml → sources/ggml}/src/ggml-backend-reg.cpp +92 -53
  189. data/ext/{ggml → sources/ggml}/src/ggml-backend.cpp +69 -34
  190. data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +87 -0
  191. data/ext/sources/ggml/src/ggml-cann/CMakeLists.txt +75 -0
  192. data/ext/sources/ggml/src/ggml-cann/Doxyfile +2579 -0
  193. data/ext/{ggml → sources/ggml}/src/ggml-cann/acl_tensor.cpp +10 -4
  194. data/ext/{ggml → sources/ggml}/src/ggml-cann/acl_tensor.h +5 -5
  195. data/ext/{ggml → sources/ggml}/src/ggml-cann/aclnn_ops.cpp +1272 -1506
  196. data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +1125 -0
  197. data/ext/{ggml → sources/ggml}/src/ggml-cann/common.h +140 -1
  198. data/ext/{ggml → sources/ggml}/src/ggml-cann/ggml-cann.cpp +588 -146
  199. data/ext/sources/ggml/src/ggml-cann/kernels/CMakeLists.txt +30 -0
  200. data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/dup.cpp +3 -5
  201. data/ext/{ggml → sources/ggml}/src/ggml-common.h +16 -8
  202. data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +597 -0
  203. data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/amx.cpp +3 -2
  204. data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/mmq.cpp +11 -10
  205. data/ext/sources/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
  206. data/ext/sources/ggml/src/ggml-cpu/arch/arm/quants.c +4114 -0
  207. data/ext/sources/ggml/src/ggml-cpu/arch/arm/repack.cpp +2163 -0
  208. data/ext/sources/ggml/src/ggml-cpu/arch/loongarch/quants.c +2639 -0
  209. data/ext/sources/ggml/src/ggml-cpu/arch/powerpc/cpu-feats.cpp +82 -0
  210. data/ext/sources/ggml/src/ggml-cpu/arch/powerpc/quants.c +2732 -0
  211. data/ext/sources/ggml/src/ggml-cpu/arch/riscv/quants.c +2069 -0
  212. data/ext/sources/ggml/src/ggml-cpu/arch/riscv/repack.cpp +397 -0
  213. data/ext/sources/ggml/src/ggml-cpu/arch/s390/quants.c +1300 -0
  214. data/ext/sources/ggml/src/ggml-cpu/arch/wasm/quants.c +1481 -0
  215. data/ext/{ggml/src/ggml-cpu/cpu-feats-x86.cpp → sources/ggml/src/ggml-cpu/arch/x86/cpu-feats.cpp} +5 -1
  216. data/ext/sources/ggml/src/ggml-cpu/arch/x86/quants.c +4311 -0
  217. data/ext/sources/ggml/src/ggml-cpu/arch/x86/repack.cpp +3285 -0
  218. data/ext/sources/ggml/src/ggml-cpu/arch-fallback.h +184 -0
  219. data/ext/sources/ggml/src/ggml-cpu/binary-ops.cpp +158 -0
  220. data/ext/sources/ggml/src/ggml-cpu/binary-ops.h +16 -0
  221. data/ext/sources/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +100 -0
  222. data/ext/sources/ggml/src/ggml-cpu/common.h +73 -0
  223. data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-impl.h +172 -41
  224. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +3551 -0
  225. data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu.cpp +78 -25
  226. data/ext/{ggml/src/ggml-cpu/ggml-cpu-hbm.cpp → sources/ggml/src/ggml-cpu/hbm.cpp} +1 -1
  227. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +337 -0
  228. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.h +95 -0
  229. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +482 -0
  230. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.h +17 -0
  231. data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +3594 -0
  232. data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.h +19 -0
  233. data/ext/sources/ggml/src/ggml-cpu/ops.cpp +9786 -0
  234. data/ext/sources/ggml/src/ggml-cpu/ops.h +118 -0
  235. data/ext/sources/ggml/src/ggml-cpu/quants.c +1158 -0
  236. data/ext/{ggml/src/ggml-cpu/ggml-cpu-quants.h → sources/ggml/src/ggml-cpu/quants.h} +26 -0
  237. data/ext/sources/ggml/src/ggml-cpu/repack.cpp +1571 -0
  238. data/ext/sources/ggml/src/ggml-cpu/repack.h +98 -0
  239. data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +1184 -0
  240. data/ext/{ggml/src/ggml-cpu/ggml-cpu-traits.cpp → sources/ggml/src/ggml-cpu/traits.cpp} +1 -1
  241. data/ext/sources/ggml/src/ggml-cpu/unary-ops.cpp +186 -0
  242. data/ext/sources/ggml/src/ggml-cpu/unary-ops.h +28 -0
  243. data/ext/sources/ggml/src/ggml-cpu/vec.cpp +345 -0
  244. data/ext/sources/ggml/src/ggml-cpu/vec.h +1027 -0
  245. data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +184 -0
  246. data/ext/sources/ggml/src/ggml-cuda/acc.cu +61 -0
  247. data/ext/sources/ggml/src/ggml-cuda/acc.cuh +5 -0
  248. data/ext/sources/ggml/src/ggml-cuda/arange.cu +34 -0
  249. data/ext/sources/ggml/src/ggml-cuda/arange.cuh +5 -0
  250. data/ext/sources/ggml/src/ggml-cuda/argmax.cu +91 -0
  251. data/ext/sources/ggml/src/ggml-cuda/argmax.cuh +3 -0
  252. data/ext/sources/ggml/src/ggml-cuda/argsort.cu +104 -0
  253. data/ext/sources/ggml/src/ggml-cuda/argsort.cuh +3 -0
  254. data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +363 -0
  255. data/ext/sources/ggml/src/ggml-cuda/binbcast.cuh +9 -0
  256. data/ext/sources/ggml/src/ggml-cuda/clamp.cu +45 -0
  257. data/ext/sources/ggml/src/ggml-cuda/clamp.cuh +5 -0
  258. data/ext/sources/ggml/src/ggml-cuda/common.cuh +851 -0
  259. data/ext/sources/ggml/src/ggml-cuda/concat.cu +221 -0
  260. data/ext/sources/ggml/src/ggml-cuda/concat.cuh +5 -0
  261. data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cu +89 -0
  262. data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cuh +5 -0
  263. data/ext/sources/ggml/src/ggml-cuda/conv2d-dw.cu +161 -0
  264. data/ext/sources/ggml/src/ggml-cuda/conv2d-dw.cuh +5 -0
  265. data/ext/sources/ggml/src/ggml-cuda/conv2d-transpose.cu +91 -0
  266. data/ext/sources/ggml/src/ggml-cuda/conv2d-transpose.cuh +4 -0
  267. data/ext/sources/ggml/src/ggml-cuda/convert.cu +752 -0
  268. data/ext/sources/ggml/src/ggml-cuda/convert.cuh +31 -0
  269. data/ext/sources/ggml/src/ggml-cuda/count-equal.cu +64 -0
  270. data/ext/sources/ggml/src/ggml-cuda/count-equal.cuh +5 -0
  271. data/ext/sources/ggml/src/ggml-cuda/cp-async.cuh +57 -0
  272. data/ext/sources/ggml/src/ggml-cuda/cpy.cu +705 -0
  273. data/ext/sources/ggml/src/ggml-cuda/cpy.cuh +11 -0
  274. data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cu +189 -0
  275. data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cuh +7 -0
  276. data/ext/sources/ggml/src/ggml-cuda/dequantize.cuh +103 -0
  277. data/ext/sources/ggml/src/ggml-cuda/diagmask.cu +40 -0
  278. data/ext/sources/ggml/src/ggml-cuda/diagmask.cuh +5 -0
  279. data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +881 -0
  280. data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +1474 -0
  281. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cu +357 -0
  282. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cuh +3 -0
  283. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cu +365 -0
  284. data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cuh +3 -0
  285. data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f16.cuh +482 -0
  286. data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f32.cuh +472 -0
  287. data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +638 -0
  288. data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +3 -0
  289. data/ext/sources/ggml/src/ggml-cuda/fattn.cu +346 -0
  290. data/ext/sources/ggml/src/ggml-cuda/fattn.cuh +3 -0
  291. data/ext/sources/ggml/src/ggml-cuda/getrows.cu +275 -0
  292. data/ext/sources/ggml/src/ggml-cuda/getrows.cuh +15 -0
  293. data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +3647 -0
  294. data/ext/sources/ggml/src/ggml-cuda/gla.cu +93 -0
  295. data/ext/sources/ggml/src/ggml-cuda/gla.cuh +3 -0
  296. data/ext/sources/ggml/src/ggml-cuda/im2col.cu +103 -0
  297. data/ext/sources/ggml/src/ggml-cuda/im2col.cuh +5 -0
  298. data/ext/sources/ggml/src/ggml-cuda/mean.cu +19 -0
  299. data/ext/sources/ggml/src/ggml-cuda/mean.cuh +3 -0
  300. data/ext/sources/ggml/src/ggml-cuda/mma.cuh +396 -0
  301. data/ext/sources/ggml/src/ggml-cuda/mmq.cu +324 -0
  302. data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +3217 -0
  303. data/ext/sources/ggml/src/ggml-cuda/mmv.cu +506 -0
  304. data/ext/sources/ggml/src/ggml-cuda/mmv.cuh +11 -0
  305. data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +595 -0
  306. data/ext/sources/ggml/src/ggml-cuda/mmvq.cuh +12 -0
  307. data/ext/sources/ggml/src/ggml-cuda/norm.cu +458 -0
  308. data/ext/sources/ggml/src/ggml-cuda/norm.cuh +11 -0
  309. data/ext/sources/ggml/src/ggml-cuda/opt-step-adamw.cu +78 -0
  310. data/ext/sources/ggml/src/ggml-cuda/opt-step-adamw.cuh +5 -0
  311. data/ext/sources/ggml/src/ggml-cuda/out-prod.cu +68 -0
  312. data/ext/sources/ggml/src/ggml-cuda/out-prod.cuh +3 -0
  313. data/ext/sources/ggml/src/ggml-cuda/pad.cu +49 -0
  314. data/ext/sources/ggml/src/ggml-cuda/pad.cuh +5 -0
  315. data/ext/sources/ggml/src/ggml-cuda/pool2d.cu +94 -0
  316. data/ext/sources/ggml/src/ggml-cuda/pool2d.cuh +5 -0
  317. data/ext/sources/ggml/src/ggml-cuda/quantize.cu +190 -0
  318. data/ext/sources/ggml/src/ggml-cuda/quantize.cuh +27 -0
  319. data/ext/sources/ggml/src/ggml-cuda/rope.cu +456 -0
  320. data/ext/sources/ggml/src/ggml-cuda/rope.cuh +7 -0
  321. data/ext/sources/ggml/src/ggml-cuda/scale.cu +31 -0
  322. data/ext/sources/ggml/src/ggml-cuda/scale.cuh +5 -0
  323. data/ext/sources/ggml/src/ggml-cuda/softmax.cu +283 -0
  324. data/ext/sources/ggml/src/ggml-cuda/softmax.cuh +7 -0
  325. data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +148 -0
  326. data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cuh +3 -0
  327. data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cu +155 -0
  328. data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cuh +3 -0
  329. data/ext/sources/ggml/src/ggml-cuda/sum.cu +45 -0
  330. data/ext/sources/ggml/src/ggml-cuda/sum.cuh +5 -0
  331. data/ext/sources/ggml/src/ggml-cuda/sumrows.cu +26 -0
  332. data/ext/sources/ggml/src/ggml-cuda/sumrows.cuh +4 -0
  333. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_16.cu +5 -0
  334. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_8.cu +10 -0
  335. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_1.cu +10 -0
  336. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_2.cu +10 -0
  337. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +10 -0
  338. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_16.cu +5 -0
  339. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +10 -0
  340. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_8.cu +10 -0
  341. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_1.cu +10 -0
  342. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_2.cu +10 -0
  343. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_16.cu +5 -0
  344. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_2.cu +10 -0
  345. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +10 -0
  346. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_8.cu +10 -0
  347. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_64-ncols2_1.cu +10 -0
  348. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_1.cu +10 -0
  349. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_2.cu +10 -0
  350. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +10 -0
  351. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_8.cu +10 -0
  352. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +5 -0
  353. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +5 -0
  354. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +5 -0
  355. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +5 -0
  356. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +5 -0
  357. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +5 -0
  358. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +5 -0
  359. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +5 -0
  360. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +5 -0
  361. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +5 -0
  362. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +5 -0
  363. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +5 -0
  364. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +5 -0
  365. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +5 -0
  366. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +5 -0
  367. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +5 -0
  368. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +5 -0
  369. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +5 -0
  370. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +5 -0
  371. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +5 -0
  372. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +5 -0
  373. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +5 -0
  374. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +5 -0
  375. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +5 -0
  376. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +5 -0
  377. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +5 -0
  378. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +5 -0
  379. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +5 -0
  380. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +5 -0
  381. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +5 -0
  382. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +5 -0
  383. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +5 -0
  384. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +5 -0
  385. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +5 -0
  386. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +5 -0
  387. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +5 -0
  388. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +5 -0
  389. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +5 -0
  390. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +5 -0
  391. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +5 -0
  392. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +5 -0
  393. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +5 -0
  394. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +5 -0
  395. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +5 -0
  396. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +5 -0
  397. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +5 -0
  398. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +5 -0
  399. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +5 -0
  400. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +5 -0
  401. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +5 -0
  402. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +5 -0
  403. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +5 -0
  404. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +5 -0
  405. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +5 -0
  406. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +5 -0
  407. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +5 -0
  408. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +5 -0
  409. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +5 -0
  410. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +5 -0
  411. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +5 -0
  412. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +5 -0
  413. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +5 -0
  414. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +5 -0
  415. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +5 -0
  416. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +5 -0
  417. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +5 -0
  418. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +5 -0
  419. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +5 -0
  420. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +5 -0
  421. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +5 -0
  422. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +5 -0
  423. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +5 -0
  424. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +5 -0
  425. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +5 -0
  426. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +5 -0
  427. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +5 -0
  428. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +5 -0
  429. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +5 -0
  430. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +5 -0
  431. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +5 -0
  432. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +5 -0
  433. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +5 -0
  434. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +5 -0
  435. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +5 -0
  436. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +5 -0
  437. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +5 -0
  438. data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +78 -0
  439. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_s.cu +5 -0
  440. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_s.cu +5 -0
  441. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu +5 -0
  442. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu +5 -0
  443. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_s.cu +5 -0
  444. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu +5 -0
  445. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu +5 -0
  446. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu +5 -0
  447. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q2_k.cu +5 -0
  448. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q3_k.cu +5 -0
  449. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_0.cu +5 -0
  450. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_1.cu +5 -0
  451. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_k.cu +5 -0
  452. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_0.cu +5 -0
  453. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_1.cu +5 -0
  454. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_k.cu +5 -0
  455. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q6_k.cu +5 -0
  456. data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q8_0.cu +5 -0
  457. data/ext/sources/ggml/src/ggml-cuda/tsembd.cu +47 -0
  458. data/ext/sources/ggml/src/ggml-cuda/tsembd.cuh +5 -0
  459. data/ext/sources/ggml/src/ggml-cuda/unary.cu +378 -0
  460. data/ext/sources/ggml/src/ggml-cuda/unary.cuh +66 -0
  461. data/ext/sources/ggml/src/ggml-cuda/upscale.cu +51 -0
  462. data/ext/sources/ggml/src/ggml-cuda/upscale.cuh +5 -0
  463. data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +1135 -0
  464. data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/cuda.h +1 -0
  465. data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/hip.h +57 -0
  466. data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/musa.h +7 -1
  467. data/ext/sources/ggml/src/ggml-cuda/wkv.cu +199 -0
  468. data/ext/sources/ggml/src/ggml-cuda/wkv.cuh +7 -0
  469. data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +135 -0
  470. data/ext/{ggml → sources/ggml}/src/ggml-impl.h +147 -158
  471. data/ext/sources/ggml/src/ggml-kompute/CMakeLists.txt +166 -0
  472. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/common.comp +112 -0
  473. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_add.comp +58 -0
  474. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_addrow.comp +25 -0
  475. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f16.comp +52 -0
  476. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f32.comp +52 -0
  477. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f16.comp +52 -0
  478. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f32.comp +52 -0
  479. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_diagmask.comp +30 -0
  480. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_gelu.comp +22 -0
  481. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows.comp +17 -0
  482. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f16.comp +31 -0
  483. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f32.comp +31 -0
  484. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_0.comp +38 -0
  485. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_1.comp +39 -0
  486. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q6_k.comp +44 -0
  487. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul.comp +52 -0
  488. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp +69 -0
  489. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_mat_f32.comp +51 -0
  490. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_0.comp +33 -0
  491. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_1.comp +35 -0
  492. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp +140 -0
  493. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp +106 -0
  494. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q8_0.comp +73 -0
  495. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp +52 -0
  496. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp +28 -0
  497. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_norm.comp +84 -0
  498. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_relu.comp +21 -0
  499. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rmsnorm.comp +53 -0
  500. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp +52 -0
  501. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp +52 -0
  502. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp +52 -0
  503. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp +52 -0
  504. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale.comp +19 -0
  505. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale_8.comp +23 -0
  506. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_silu.comp +22 -0
  507. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp +72 -0
  508. data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp +71 -0
  509. data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +121 -0
  510. data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +649 -0
  511. data/ext/{ggml → sources/ggml}/src/ggml-metal/ggml-metal.m +2504 -1108
  512. data/ext/{ggml → sources/ggml}/src/ggml-metal/ggml-metal.metal +2102 -1463
  513. data/ext/sources/ggml/src/ggml-musa/CMakeLists.txt +113 -0
  514. data/ext/sources/ggml/src/ggml-musa/mudnn.cu +112 -0
  515. data/ext/sources/ggml/src/ggml-musa/mudnn.cuh +12 -0
  516. data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +110 -0
  517. data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +6494 -0
  518. data/ext/sources/ggml/src/ggml-opencl/kernels/add.cl +83 -0
  519. data/ext/sources/ggml/src/ggml-opencl/kernels/argsort.cl +86 -0
  520. data/ext/sources/ggml/src/ggml-opencl/kernels/clamp.cl +20 -0
  521. data/ext/sources/ggml/src/ggml-opencl/kernels/concat.cl +109 -0
  522. data/ext/sources/ggml/src/ggml-opencl/kernels/cpy.cl +184 -0
  523. data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +118 -0
  524. data/ext/sources/ggml/src/ggml-opencl/kernels/diag_mask_inf.cl +58 -0
  525. data/ext/sources/ggml/src/ggml-opencl/kernels/div.cl +72 -0
  526. data/ext/sources/ggml/src/ggml-opencl/kernels/embed_kernel.py +26 -0
  527. data/ext/sources/ggml/src/ggml-opencl/kernels/gelu.cl +62 -0
  528. data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle.cl +268 -0
  529. data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general.cl +274 -0
  530. data/ext/sources/ggml/src/ggml-opencl/kernels/get_rows.cl +163 -0
  531. data/ext/sources/ggml/src/ggml-opencl/kernels/glu.cl +201 -0
  532. data/ext/sources/ggml/src/ggml-opencl/kernels/group_norm.cl +72 -0
  533. data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f16.cl +57 -0
  534. data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f32.cl +57 -0
  535. data/ext/sources/ggml/src/ggml-opencl/kernels/mul.cl +79 -0
  536. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mat_Ab_Bi_8x4.cl +139 -0
  537. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f16.cl +118 -0
  538. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32.cl +118 -0
  539. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_1row.cl +94 -0
  540. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_l4.cl +84 -0
  541. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f32_f32.cl +118 -0
  542. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_q4_0_f32_8x_flat.cl +283 -0
  543. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32.cl +192 -0
  544. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_16x_flat.cl +307 -0
  545. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_8x_flat.cl +265 -0
  546. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_8x_flat.cl +272 -0
  547. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_v.cl +254 -0
  548. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q6_k.cl +190 -0
  549. data/ext/sources/ggml/src/ggml-opencl/kernels/norm.cl +81 -0
  550. data/ext/sources/ggml/src/ggml-opencl/kernels/pad.cl +30 -0
  551. data/ext/sources/ggml/src/ggml-opencl/kernels/relu.cl +16 -0
  552. data/ext/sources/ggml/src/ggml-opencl/kernels/repeat.cl +39 -0
  553. data/ext/sources/ggml/src/ggml-opencl/kernels/rms_norm.cl +96 -0
  554. data/ext/sources/ggml/src/ggml-opencl/kernels/rope.cl +721 -0
  555. data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +16 -0
  556. data/ext/sources/ggml/src/ggml-opencl/kernels/sigmoid.cl +29 -0
  557. data/ext/sources/ggml/src/ggml-opencl/kernels/silu.cl +30 -0
  558. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f16.cl +87 -0
  559. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f32.cl +87 -0
  560. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f16.cl +86 -0
  561. data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f32.cl +86 -0
  562. data/ext/sources/ggml/src/ggml-opencl/kernels/sub.cl +72 -0
  563. data/ext/sources/ggml/src/ggml-opencl/kernels/sum_rows.cl +39 -0
  564. data/ext/sources/ggml/src/ggml-opencl/kernels/tanh.cl +63 -0
  565. data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +84 -0
  566. data/ext/sources/ggml/src/ggml-opencl/kernels/tsembd.cl +48 -0
  567. data/ext/sources/ggml/src/ggml-opencl/kernels/upscale.cl +121 -0
  568. data/ext/{ggml → sources/ggml}/src/ggml-opt.cpp +373 -190
  569. data/ext/{ggml → sources/ggml}/src/ggml-quants.c +120 -128
  570. data/ext/sources/ggml/src/ggml-rpc/CMakeLists.txt +9 -0
  571. data/ext/{ggml → sources/ggml}/src/ggml-rpc/ggml-rpc.cpp +494 -84
  572. data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +189 -0
  573. data/ext/sources/ggml/src/ggml-sycl/backend.hpp +37 -0
  574. data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +344 -0
  575. data/ext/sources/ggml/src/ggml-sycl/binbcast.hpp +39 -0
  576. data/ext/{ggml → sources/ggml}/src/ggml-sycl/common.cpp +20 -32
  577. data/ext/sources/ggml/src/ggml-sycl/common.hpp +561 -0
  578. data/ext/{ggml → sources/ggml}/src/ggml-sycl/concat.cpp +56 -70
  579. data/ext/sources/ggml/src/ggml-sycl/concat.hpp +20 -0
  580. data/ext/{ggml → sources/ggml}/src/ggml-sycl/conv.cpp +8 -12
  581. data/ext/sources/ggml/src/ggml-sycl/conv.hpp +20 -0
  582. data/ext/sources/ggml/src/ggml-sycl/convert.cpp +575 -0
  583. data/ext/sources/ggml/src/ggml-sycl/convert.hpp +34 -0
  584. data/ext/sources/ggml/src/ggml-sycl/cpy.cpp +839 -0
  585. data/ext/sources/ggml/src/ggml-sycl/cpy.hpp +11 -0
  586. data/ext/sources/ggml/src/ggml-sycl/dequantize.hpp +823 -0
  587. data/ext/{ggml → sources/ggml}/src/ggml-sycl/dmmv.cpp +188 -67
  588. data/ext/sources/ggml/src/ggml-sycl/dmmv.hpp +27 -0
  589. data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +2987 -0
  590. data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +1120 -0
  591. data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +84 -0
  592. data/ext/sources/ggml/src/ggml-sycl/gemm.hpp +102 -0
  593. data/ext/sources/ggml/src/ggml-sycl/getrows.cpp +212 -0
  594. data/ext/sources/ggml/src/ggml-sycl/getrows.hpp +20 -0
  595. data/ext/{ggml → sources/ggml}/src/ggml-sycl/ggml-sycl.cpp +1197 -1295
  596. data/ext/sources/ggml/src/ggml-sycl/gla.cpp +106 -0
  597. data/ext/sources/ggml/src/ggml-sycl/gla.hpp +8 -0
  598. data/ext/sources/ggml/src/ggml-sycl/im2col.cpp +136 -0
  599. data/ext/sources/ggml/src/ggml-sycl/im2col.hpp +21 -0
  600. data/ext/{ggml → sources/ggml}/src/ggml-sycl/mmq.cpp +60 -81
  601. data/ext/sources/ggml/src/ggml-sycl/mmq.hpp +33 -0
  602. data/ext/sources/ggml/src/ggml-sycl/mmvq.cpp +1065 -0
  603. data/ext/sources/ggml/src/ggml-sycl/mmvq.hpp +27 -0
  604. data/ext/sources/ggml/src/ggml-sycl/norm.cpp +482 -0
  605. data/ext/sources/ggml/src/ggml-sycl/norm.hpp +26 -0
  606. data/ext/{ggml → sources/ggml}/src/ggml-sycl/outprod.cpp +8 -17
  607. data/ext/sources/ggml/src/ggml-sycl/outprod.hpp +10 -0
  608. data/ext/sources/ggml/src/ggml-sycl/presets.hpp +74 -0
  609. data/ext/sources/ggml/src/ggml-sycl/quants.hpp +111 -0
  610. data/ext/sources/ggml/src/ggml-sycl/rope.cpp +472 -0
  611. data/ext/sources/ggml/src/ggml-sycl/rope.hpp +20 -0
  612. data/ext/{ggml → sources/ggml}/src/ggml-sycl/softmax.cpp +38 -28
  613. data/ext/sources/ggml/src/ggml-sycl/softmax.hpp +20 -0
  614. data/ext/sources/ggml/src/ggml-sycl/sycl_hw.cpp +15 -0
  615. data/ext/sources/ggml/src/ggml-sycl/sycl_hw.hpp +26 -0
  616. data/ext/{ggml → sources/ggml}/src/ggml-sycl/tsembd.cpp +6 -11
  617. data/ext/sources/ggml/src/ggml-sycl/tsembd.hpp +20 -0
  618. data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +1307 -0
  619. data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +289 -0
  620. data/ext/sources/ggml/src/ggml-sycl/wkv.hpp +10 -0
  621. data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +200 -0
  622. data/ext/sources/ggml/src/ggml-vulkan/cmake/host-toolchain.cmake.in +15 -0
  623. data/ext/{ggml → sources/ggml}/src/ggml-vulkan/ggml-vulkan.cpp +3822 -1335
  624. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +31 -0
  625. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +29 -0
  626. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +29 -0
  627. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +51 -0
  628. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +69 -0
  629. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp +17 -0
  630. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp +41 -0
  631. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp +49 -0
  632. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp +105 -0
  633. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv_transpose_1d.comp +98 -0
  634. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp +23 -0
  635. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +51 -0
  636. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +242 -0
  637. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp +17 -0
  638. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/count_equal.comp +31 -0
  639. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp +20 -0
  640. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +462 -0
  641. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +699 -0
  642. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_head.comp +13 -0
  643. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_m.comp +42 -0
  644. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_s.comp +35 -0
  645. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +44 -0
  646. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp +43 -0
  647. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +48 -0
  648. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +39 -0
  649. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +49 -0
  650. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp +32 -0
  651. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp +34 -0
  652. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +34 -0
  653. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +42 -0
  654. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp +30 -0
  655. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp +32 -0
  656. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +68 -0
  657. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp +34 -0
  658. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp +35 -0
  659. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +70 -0
  660. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +33 -0
  661. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp +31 -0
  662. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp +34 -0
  663. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/div.comp +27 -0
  664. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +337 -0
  665. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.comp +162 -0
  666. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +360 -0
  667. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +267 -0
  668. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +59 -0
  669. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu.comp +13 -0
  670. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp +25 -0
  671. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp +23 -0
  672. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp +64 -0
  673. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_head.comp +9 -0
  674. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp +76 -0
  675. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +33 -0
  676. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +41 -0
  677. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/glu_head.comp +15 -0
  678. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/glu_main.comp +29 -0
  679. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp +66 -0
  680. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +100 -0
  681. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +41 -0
  682. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp +22 -0
  683. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp +27 -0
  684. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp +48 -0
  685. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +169 -0
  686. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +118 -0
  687. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_m.comp +82 -0
  688. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp +79 -0
  689. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_s.comp +90 -0
  690. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xs.comp +87 -0
  691. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xxs.comp +87 -0
  692. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_s.comp +90 -0
  693. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_xxs.comp +88 -0
  694. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +118 -0
  695. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp +154 -0
  696. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +130 -0
  697. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +132 -0
  698. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +136 -0
  699. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +167 -0
  700. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +130 -0
  701. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +868 -0
  702. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +441 -0
  703. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +442 -0
  704. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +99 -0
  705. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp +44 -0
  706. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_adamw.comp +42 -0
  707. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +28 -0
  708. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp +74 -0
  709. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +77 -0
  710. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/reglu.comp +9 -0
  711. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp +21 -0
  712. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp +26 -0
  713. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat_back.comp +37 -0
  714. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +61 -0
  715. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_back.comp +55 -0
  716. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +58 -0
  717. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +60 -0
  718. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +43 -0
  719. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +43 -0
  720. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +47 -0
  721. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +24 -0
  722. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sigmoid.comp +20 -0
  723. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp +22 -0
  724. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu_back.comp +26 -0
  725. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp +17 -0
  726. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +173 -0
  727. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +50 -0
  728. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/square.comp +17 -0
  729. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sub.comp +29 -0
  730. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +37 -0
  731. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/swiglu.comp +9 -0
  732. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp +20 -0
  733. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_bfloat16_support.comp +7 -0
  734. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp +7 -0
  735. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp +7 -0
  736. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_integer_dot_support.comp +7 -0
  737. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +41 -0
  738. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +1373 -0
  739. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +36 -0
  740. data/ext/{ggml → sources/ggml}/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +203 -36
  741. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp +87 -0
  742. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/wkv7.comp +91 -0
  743. data/ext/{ggml → sources/ggml}/src/ggml.c +918 -1782
  744. data/ext/sources/ggml/src/ggml.cpp +26 -0
  745. data/ext/sources/ggml/src/gguf.cpp +1351 -0
  746. data/ext/{include → sources/include}/whisper.h +70 -2
  747. data/ext/sources/src/CMakeLists.txt +145 -0
  748. data/ext/sources/src/coreml/whisper-compat.h +10 -0
  749. data/ext/sources/src/coreml/whisper-compat.m +35 -0
  750. data/ext/{src → sources/src}/coreml/whisper-decoder-impl.h +27 -15
  751. data/ext/{src → sources/src}/coreml/whisper-decoder-impl.m +36 -10
  752. data/ext/{src → sources/src}/coreml/whisper-encoder-impl.h +21 -9
  753. data/ext/{src → sources/src}/coreml/whisper-encoder-impl.m +29 -3
  754. data/ext/sources/src/coreml/whisper-encoder.mm +73 -0
  755. data/ext/sources/src/whisper-arch.h +197 -0
  756. data/ext/{src → sources/src}/whisper.cpp +1966 -386
  757. data/ext/sources/tests/CMakeLists.txt +105 -0
  758. data/ext/sources/tests/earnings21/eval.mk +58 -0
  759. data/ext/sources/tests/earnings21/eval.py +68 -0
  760. data/ext/sources/tests/earnings21/normalizers/__init__.py +2 -0
  761. data/ext/sources/tests/earnings21/normalizers/basic.py +80 -0
  762. data/ext/sources/tests/earnings21/normalizers/english.json +1741 -0
  763. data/ext/sources/tests/earnings21/normalizers/english.py +550 -0
  764. data/ext/sources/tests/earnings21/requirements.txt +6 -0
  765. data/ext/sources/tests/en-0-ref.txt +1 -0
  766. data/ext/sources/tests/en-1-ref.txt +1 -0
  767. data/ext/sources/tests/en-2-ref.txt +1 -0
  768. data/ext/sources/tests/es-0-ref.txt +1 -0
  769. data/ext/sources/tests/librispeech/eval.mk +39 -0
  770. data/ext/sources/tests/librispeech/eval.py +47 -0
  771. data/ext/sources/tests/librispeech/normalizers/__init__.py +2 -0
  772. data/ext/sources/tests/librispeech/normalizers/basic.py +80 -0
  773. data/ext/sources/tests/librispeech/normalizers/english.json +1741 -0
  774. data/ext/sources/tests/librispeech/normalizers/english.py +550 -0
  775. data/ext/sources/tests/librispeech/requirements.txt +6 -0
  776. data/ext/sources/tests/run-tests.sh +130 -0
  777. data/ext/sources/tests/test-c.c +3 -0
  778. data/ext/sources/tests/test-vad-full.cpp +54 -0
  779. data/ext/sources/tests/test-vad.cpp +83 -0
  780. data/ext/sources/tests/test-whisper.js +58 -0
  781. data/extsources.rb +39 -5
  782. data/lib/whisper/context.rb +15 -0
  783. data/lib/whisper/model/uri.rb +202 -126
  784. data/lib/whisper/segment.rb +58 -0
  785. data/sig/whisper.rbs +510 -0
  786. data/test/helper.rb +24 -0
  787. data/{tests → test}/test_callback.rb +45 -3
  788. data/{tests → test}/test_error.rb +2 -2
  789. data/{tests → test}/test_model.rb +47 -0
  790. data/test/test_package.rb +51 -0
  791. data/test/test_params.rb +297 -0
  792. data/test/test_segment.rb +146 -0
  793. data/test/test_vad.rb +19 -0
  794. data/test/test_vad_params.rb +103 -0
  795. data/{tests → test}/test_whisper.rb +106 -36
  796. data/whispercpp.gemspec +5 -5
  797. metadata +837 -134
  798. data/ext/cpu.mk +0 -9
  799. data/ext/examples/dr_wav.h +0 -8815
  800. data/ext/ggml/src/ggml-cann/aclnn_ops.h +0 -592
  801. data/ext/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +0 -4262
  802. data/ext/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +0 -8
  803. data/ext/ggml/src/ggml-cpu/ggml-cpu-quants.c +0 -10835
  804. data/ext/ggml/src/ggml-cpu/ggml-cpu.c +0 -14123
  805. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.cpp +0 -1884
  806. data/ext/ggml/src/ggml-cpu/llamafile/sgemm.h +0 -14
  807. data/ext/ggml/src/ggml-metal/ggml-metal-impl.h +0 -288
  808. data/ext/ggml/src/ggml-sycl/convert.cpp +0 -547
  809. data/ext/ggml/src/ggml-sycl/element_wise.cpp +0 -1030
  810. data/ext/ggml/src/ggml-sycl/im2col.cpp +0 -126
  811. data/ext/ggml/src/ggml-sycl/mmvq.cpp +0 -1015
  812. data/ext/ggml/src/ggml-sycl/norm.cpp +0 -378
  813. data/ext/ggml/src/ggml-sycl/rope.cpp +0 -276
  814. data/ext/ggml/src/ggml-sycl/wkv6.cpp +0 -141
  815. data/ext/metal-embed.mk +0 -17
  816. data/ext/metal.mk +0 -6
  817. data/ext/ruby_whisper.cpp +0 -1909
  818. data/ext/scripts/get-flags.mk +0 -38
  819. data/lib/whisper.rb +0 -2
  820. data/tests/helper.rb +0 -7
  821. data/tests/test_package.rb +0 -31
  822. data/tests/test_params.rb +0 -160
  823. data/tests/test_segment.rb +0 -83
  824. /data/ext/{ggml → sources/ggml}/include/ggml-blas.h +0 -0
  825. /data/ext/{ggml → sources/ggml}/include/ggml-cann.h +0 -0
  826. /data/ext/{ggml → sources/ggml}/include/ggml-cuda.h +0 -0
  827. /data/ext/{ggml → sources/ggml}/include/ggml-kompute.h +0 -0
  828. /data/ext/{ggml → sources/ggml}/include/ggml-opencl.h +0 -0
  829. /data/ext/{ggml → sources/ggml}/include/ggml-sycl.h +0 -0
  830. /data/ext/{ggml → sources/ggml}/src/ggml-amx/common.h +0 -0
  831. /data/ext/{ggml → sources/ggml}/src/ggml-amx/ggml-amx.cpp +0 -0
  832. /data/ext/{ggml → sources/ggml}/src/ggml-amx/mmq.cpp +0 -0
  833. /data/ext/{ggml → sources/ggml}/src/ggml-amx/mmq.h +0 -0
  834. /data/ext/{ggml → sources/ggml}/src/ggml-blas/ggml-blas.cpp +0 -0
  835. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/ascendc_kernels.h +0 -0
  836. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_f16.cpp +0 -0
  837. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_f32.cpp +0 -0
  838. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_q4_0.cpp +0 -0
  839. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_q8_0.cpp +0 -0
  840. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +0 -0
  841. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +0 -0
  842. /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +0 -0
  843. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/amx.h +0 -0
  844. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/common.h +0 -0
  845. /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/mmq.h +0 -0
  846. /data/ext/{ggml/src/ggml-cpu/ggml-cpu-hbm.h → sources/ggml/src/ggml-cpu/hbm.h} +0 -0
  847. /data/ext/{ggml/src/ggml-cpu/ggml-cpu-traits.h → sources/ggml/src/ggml-cpu/traits.h} +0 -0
  848. /data/ext/{ggml → sources/ggml}/src/ggml-kompute/ggml-kompute.cpp +0 -0
  849. /data/ext/{ggml → sources/ggml}/src/ggml-quants.h +0 -0
  850. /data/ext/{ggml → sources/ggml}/src/ggml-threading.cpp +0 -0
  851. /data/ext/{ggml → sources/ggml}/src/ggml-threading.h +0 -0
  852. /data/ext/{src → sources/src}/coreml/whisper-encoder.h +0 -0
  853. /data/ext/{src → sources/src}/openvino/whisper-openvino-encoder.cpp +0 -0
  854. /data/ext/{src → sources/src}/openvino/whisper-openvino-encoder.h +0 -0
  855. /data/{tests → test}/jfk_reader/.gitignore +0 -0
  856. /data/{tests → test}/jfk_reader/extconf.rb +0 -0
  857. /data/{tests → test}/jfk_reader/jfk_reader.c +0 -0
@@ -0,0 +1,2732 @@
1
+ #define GGML_COMMON_IMPL_C
2
+ #include "ggml-common.h"
3
+ #include "ggml-quants.h"
4
+ #include "ggml-impl.h"
5
+ #include "ggml-cpu.h"
6
+ #include "simd-mappings.h"
7
+
8
+ #include "../../quants.h"
9
+ #include "../../ggml-cpu-impl.h"
10
+
11
+ #include <math.h>
12
+ #include <string.h>
13
+ #include <assert.h>
14
+ #include <float.h>
15
+ #include <stdlib.h> // for qsort
16
+ #include <stdio.h> // for GGML_ASSERT
17
+
18
+ #define GROUP_MAX_EPS 1e-15f
19
+ #define GROUP_MAX_EPS_IQ3_XXS 1e-8f
20
+ #define GROUP_MAX_EPS_IQ2_S 1e-8f
21
+ #define GROUP_MAX_EPS_IQ1_M 1e-7f
22
+ #define GROUP_MAX_EPS_IQ1_S 1e-12f
23
+
24
+ #define UNUSED GGML_UNUSED
25
+
26
+ #if defined(__POWER9_VECTOR__)
27
+ #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
28
+ #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
29
+ #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
30
+ #define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
31
+ #define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
32
+ #define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
33
+ #define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
34
+ #define B8(c,s ) B7(c,s, c), B7(c,s, s)
35
+
36
+ // precomputed tables for expanding 8bits to 8 bytes:
37
+ static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
38
+ static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
39
+ #endif
40
+
41
+ void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
42
+ assert(QK8_0 == 32);
43
+ assert(k % QK8_0 == 0);
44
+ const int nb = k / QK8_0;
45
+
46
+ block_q8_0 * GGML_RESTRICT y = vy;
47
+
48
+ #if defined(__POWER9_VECTOR__)
49
+ for (int i = 0; i < nb; i++) {
50
+ vector float srcv [8];
51
+ vector float asrcv[8];
52
+ vector float amaxv[8];
53
+ vector signed int vi[8];
54
+
55
+ for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j);
56
+ for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]);
57
+
58
+ for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]);
59
+ for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]);
60
+ for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]);
61
+
62
+ const float amax = MAX(MAX(vec_extract(amaxv[0], 0),
63
+ vec_extract(amaxv[0], 1)),
64
+ MAX(vec_extract(amaxv[0], 2),
65
+ vec_extract(amaxv[0], 3)));
66
+
67
+ const float d = amax / ((1 << 7) - 1);
68
+ const float id = d ? 1.0f/d : 0.0f;
69
+ const vector float vid = vec_splats(id);
70
+
71
+ y[i].d = GGML_CPU_FP32_TO_FP16(d);
72
+
73
+ for (int j = 0; j < 8; j++) {
74
+ const vector float v = vec_round(vec_mul(srcv[j], vid));
75
+ vi[j] = vec_cts(v, 0);
76
+ }
77
+ vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]);
78
+ vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]);
79
+ }
80
+ #else
81
+ GGML_UNUSED(nb);
82
+ // scalar
83
+ quantize_row_q8_0_ref(x, y, k);
84
+ #endif
85
+ }
86
+
87
+ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
88
+ assert(k % QK8_1 == 0);
89
+ const int nb = k / QK8_1;
90
+
91
+ block_q8_1 * GGML_RESTRICT y = vy;
92
+
93
+ #if defined(__POWER9_VECTOR__)
94
+ for (int i = 0; i < nb; i++) {
95
+ vector float srcv [8];
96
+ vector float asrcv[8];
97
+ vector float amaxv[8];
98
+ vector signed int vi[8];
99
+
100
+ for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j);
101
+ for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]);
102
+
103
+ for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]);
104
+ for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]);
105
+ for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]);
106
+
107
+ const float amax = MAX(MAX(vec_extract(amaxv[0], 0),
108
+ vec_extract(amaxv[0], 1)),
109
+ MAX(vec_extract(amaxv[0], 2),
110
+ vec_extract(amaxv[0], 3)));
111
+
112
+ const float d = amax / ((1 << 7) - 1);
113
+ const float id = d ? 1.0f/d : 0.0f;
114
+ const vector float vid = vec_splats(id);
115
+
116
+ y[i].d = GGML_CPU_FP32_TO_FP16(d);
117
+
118
+ vector int accv = vec_splats(0);
119
+
120
+ for (int j = 0; j < 8; j++) {
121
+ const vector float v = vec_round(vec_mul(srcv[j], vid));
122
+ vi[j] = vec_cts(v, 0);
123
+
124
+ accv = vec_add(accv, vi[j]);
125
+ }
126
+ vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]);
127
+ vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]);
128
+
129
+ accv = vec_add(accv, vec_sld(accv, accv, 4));
130
+ accv = vec_add(accv, vec_sld(accv, accv, 8));
131
+ y[i].s = GGML_CPU_FP32_TO_FP16(d * vec_extract(accv, 0));
132
+ }
133
+
134
+ #else
135
+ GGML_UNUSED(nb);
136
+ // scalar
137
+ quantize_row_q8_1_ref(x, y, k);
138
+ #endif
139
+ }
140
+
141
+
142
+ //===================================== Dot products =================================
143
+
144
+ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
145
+ const int qk = QK8_0;
146
+ const int nb = n / qk;
147
+
148
+ assert(n % qk == 0);
149
+ assert(nrc == 1);
150
+ UNUSED(nrc);
151
+ UNUSED(bx);
152
+ UNUSED(by);
153
+ UNUSED(bs);
154
+
155
+ const block_q4_0 * GGML_RESTRICT x = vx;
156
+ const block_q8_0 * GGML_RESTRICT y = vy;
157
+
158
+ int ib = 0;
159
+ float sumf = 0;
160
+
161
+ #if defined(__POWER9_VECTOR__)
162
+ const vector signed char lowMask = vec_splats((signed char)0xF);
163
+ const vector signed int v0 = vec_splats((int32_t)0);
164
+ const vector unsigned char v4 = vec_splats((unsigned char)0x4);
165
+ const vector signed char v8 = vec_splats((signed char)0x8);
166
+
167
+ vector float vsumf0 = vec_splats(0.0f);
168
+
169
+ #pragma GCC unroll 8
170
+ for (; ib < nb; ++ib) {
171
+ __builtin_prefetch(x[ib].qs, 0, 1);
172
+ __builtin_prefetch(y[ib].qs, 0, 1);
173
+
174
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d));
175
+ vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d));
176
+ vector float vd = vec_mul(vxd, vyd);
177
+
178
+ vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs);
179
+ vector signed char q8y0 = vec_xl( 0, y[ib].qs);
180
+ vector signed char q8y1 = vec_xl(16, y[ib].qs);
181
+
182
+ vector signed char q4x0 = vec_and(qxs, lowMask);
183
+ vector signed char q4x1 = vec_sr(qxs, v4);
184
+
185
+ q4x0 = vec_sub(q4x0, v8);
186
+ q4x1 = vec_sub(q4x1, v8);
187
+
188
+ vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0));
189
+ vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1));
190
+
191
+ vector signed int vsumi0 = v0;
192
+
193
+ vsumi0 = vec_sum4s(qv0, vsumi0);
194
+ vsumi0 = vec_sum4s(qv1, vsumi0);
195
+
196
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
197
+ }
198
+
199
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
200
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
201
+
202
+ sumf = vec_extract(vsumf0, 0);
203
+
204
+ #endif
205
+ for (; ib < nb; ++ib) {
206
+ int sumi0 = 0;
207
+ int sumi1 = 0;
208
+
209
+ for (int j = 0; j < qk/2; ++j) {
210
+ const int v0 = (x[ib].qs[j] & 0x0F) - 8;
211
+ const int v1 = (x[ib].qs[j] >> 4) - 8;
212
+
213
+ sumi0 += (v0 * y[ib].qs[j]);
214
+ sumi1 += (v1 * y[ib].qs[j + qk/2]);
215
+ }
216
+
217
+ int sumi = sumi0 + sumi1;
218
+ sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d);
219
+ }
220
+
221
+ *s = sumf;
222
+ }
223
+
224
+ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
225
+ const int qk = QK8_1;
226
+ const int nb = n / qk;
227
+
228
+ assert(n % qk == 0);
229
+ assert(nrc == 1);
230
+ UNUSED(nrc);
231
+ UNUSED(bx);
232
+ UNUSED(by);
233
+ UNUSED(bs);
234
+
235
+ const block_q4_1 * GGML_RESTRICT x = vx;
236
+ const block_q8_1 * GGML_RESTRICT y = vy;
237
+
238
+ int ib = 0;
239
+ float sumf = 0;
240
+
241
+ #if defined(__POWER9_VECTOR__)
242
+ const vector signed char lowMask = vec_splats((signed char)0xF);
243
+ const vector signed int v0 = vec_splats((int32_t)0);
244
+ const vector unsigned char v4 = vec_splats((unsigned char)0x4);
245
+
246
+ vector float vsumf0 = vec_splats(0.0f);
247
+
248
+ #pragma GCC unroll 4
249
+ for (; ib < nb; ++ib) {
250
+ __builtin_prefetch(x[ib].qs, 0, 1);
251
+ __builtin_prefetch(y[ib].qs, 0, 1);
252
+
253
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d));
254
+ vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d));
255
+ vector float vd = vec_mul(vxd, vyd);
256
+
257
+ vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].m));
258
+ vector float vys = {GGML_CPU_FP16_TO_FP32(y[ib].s), 0.0f, 0.0f, 0.0f};
259
+ vsumf0 = vec_madd(vxmin, vys, vsumf0);
260
+
261
+ vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs);
262
+ vector signed char q8y0 = vec_xl( 0, y[ib].qs);
263
+ vector signed char q8y1 = vec_xl(16, y[ib].qs);
264
+
265
+ vector unsigned char q4x0 = (vector unsigned char)vec_and(qxs, lowMask);
266
+ vector unsigned char q4x1 = (vector unsigned char)vec_sr(qxs, v4);
267
+
268
+ vector signed int vsumi0 = v0;
269
+
270
+ vsumi0 = vec_msum(q8y0, q4x0, vsumi0);
271
+ vsumi0 = vec_msum(q8y1, q4x1, vsumi0);
272
+
273
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
274
+ }
275
+
276
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
277
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
278
+
279
+ sumf = vec_extract(vsumf0, 0);
280
+
281
+ #endif
282
+ for (; ib < nb; ++ib) {
283
+ int sumi0 = 0;
284
+ int sumi1 = 0;
285
+
286
+ for (int j = 0; j < qk/2; ++j) {
287
+ const int v0 = (x[ib].qs[j] & 0x0F);
288
+ const int v1 = (x[ib].qs[j] >> 4);
289
+
290
+ sumi0 += (v0 * y[ib].qs[j]);
291
+ sumi1 += (v1 * y[ib].qs[j + qk/2]);
292
+ }
293
+
294
+ int sumi = sumi0 + sumi1;
295
+ sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
296
+ }
297
+
298
+ *s = sumf;
299
+ }
300
+
301
+ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
302
+ const int qk = QK8_0;
303
+ const int nb = n / qk;
304
+
305
+ int ib = 0;
306
+ float sumf = 0;
307
+
308
+ assert(n % qk == 0);
309
+ assert(qk == QK5_0);
310
+ assert(nrc == 1);
311
+ UNUSED(nrc);
312
+ UNUSED(bx);
313
+ UNUSED(by);
314
+ UNUSED(bs);
315
+
316
+ const block_q5_0 * GGML_RESTRICT x = vx;
317
+ const block_q8_0 * GGML_RESTRICT y = vy;
318
+
319
+ #if defined(__POWER9_VECTOR__)
320
+ const vector signed char lowMask = vec_splats((signed char)0xF);
321
+ const vector unsigned char v4 = vec_splats((unsigned char)4);
322
+
323
+ vector float vsumf0 = vec_splats(0.0f);
324
+
325
+ #pragma GCC unroll 4
326
+ for (; ib < nb; ++ib) {
327
+ __builtin_prefetch(x[ib].qs, 0, 1);
328
+ __builtin_prefetch(y[ib].qs, 0, 1);
329
+
330
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d));
331
+ vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d));
332
+ vector float vd = vec_mul(vxd, vyd);
333
+
334
+ vector signed long long aux64x2_0 = {(uint64_t)(table_b2b_1[x[ib].qh[0]]), (uint64_t)(table_b2b_1[x[ib].qh[1]])};
335
+ vector signed long long aux64x2_1 = {(uint64_t)(table_b2b_1[x[ib].qh[2]]), (uint64_t)(table_b2b_1[x[ib].qh[3]])};
336
+
337
+ vector signed char qh0 = (vector signed char)aux64x2_0;
338
+ vector signed char qh1 = (vector signed char)aux64x2_1;
339
+
340
+ vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs);
341
+
342
+ vector signed char q5x0 = vec_sub(vec_and (qxs, lowMask), qh0);
343
+ vector signed char q5x1 = vec_sub(vec_sr(qxs, v4), qh1);
344
+
345
+ vector signed char q8y0 = vec_xl( 0, y[ib].qs);
346
+ vector signed char q8y1 = vec_xl( 16, y[ib].qs);
347
+
348
+ vector signed short qv0 = vec_add(vec_mule(q5x0, q8y0), vec_mulo(q5x0, q8y0));
349
+ vector signed short qv1 = vec_add(vec_mule(q5x1, q8y1), vec_mulo(q5x1, q8y1));
350
+
351
+ qv0 = vec_add(qv0, qv1);
352
+
353
+ vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0));
354
+
355
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
356
+ }
357
+
358
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
359
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
360
+
361
+ sumf = vec_extract(vsumf0, 0);
362
+
363
+ #endif
364
+ for (; ib < nb; ++ib) {
365
+ uint32_t qh;
366
+ memcpy(&qh, x[ib].qh, sizeof(qh));
367
+
368
+ int sumi0 = 0;
369
+ int sumi1 = 0;
370
+
371
+ for (int j = 0; j < qk/2; ++j) {
372
+ const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
373
+ const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
374
+
375
+ const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16);
376
+ const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16);
377
+
378
+ sumi0 += (x0 * y[ib].qs[j]);
379
+ sumi1 += (x1 * y[ib].qs[j + qk/2]);
380
+ }
381
+
382
+ int sumi = sumi0 + sumi1;
383
+ sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi;
384
+ }
385
+
386
+ *s = sumf;
387
+ }
388
+
389
+ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
390
+ const int qk = QK8_1;
391
+ const int nb = n / qk;
392
+
393
+ int ib = 0;
394
+ float sumf = 0;
395
+
396
+ assert(n % qk == 0);
397
+ assert(qk == QK5_1);
398
+ assert(nrc == 1);
399
+ UNUSED(nrc);
400
+ UNUSED(bx);
401
+ UNUSED(by);
402
+ UNUSED(bs);
403
+
404
+ const block_q5_1 * GGML_RESTRICT x = vx;
405
+ const block_q8_1 * GGML_RESTRICT y = vy;
406
+
407
+ #if defined(__POWER9_VECTOR__)
408
+ const vector signed char lowMask = vec_splats((signed char)0xF);
409
+ const vector signed int v0 = vec_splats((int32_t)0);
410
+ const vector unsigned char v4 = vec_splats((unsigned char)0x4);
411
+
412
+ vector float vsumf0 = vec_splats(0.0f);
413
+
414
+ #pragma GCC unroll 4
415
+ for (; ib < nb; ++ib) {
416
+ __builtin_prefetch(x[ib].qs, 0, 1);
417
+ __builtin_prefetch(y[ib].qs, 0, 1);
418
+
419
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d));
420
+ vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d));
421
+ vector float vd = vec_mul(vxd, vyd);
422
+
423
+ vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].m));
424
+ vector float vys = {GGML_CPU_FP16_TO_FP32(y[ib].s), 0.f, 0.f, 0.f};
425
+ vsumf0 = vec_madd(vxmin, vys, vsumf0);
426
+
427
+ vector unsigned long long aux64x2_0 = {(uint64_t)(table_b2b_0[x[ib].qh[0]]), (uint64_t)(table_b2b_0[x[ib].qh[1]])};
428
+ vector unsigned long long aux64x2_1 = {(uint64_t)(table_b2b_0[x[ib].qh[2]]), (uint64_t)(table_b2b_0[x[ib].qh[3]])};
429
+
430
+ vector signed char qh0 = (vector signed char)aux64x2_0;
431
+ vector signed char qh1 = (vector signed char)aux64x2_1;
432
+
433
+ vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs);
434
+
435
+ vector unsigned char q5x0 = (vector unsigned char)vec_or(vec_and(qxs, lowMask), qh0);
436
+ vector unsigned char q5x1 = (vector unsigned char)vec_or(vec_sr(qxs, v4), qh1);
437
+
438
+ vector signed char q8y0 = vec_xl( 0, y[ib].qs);
439
+ vector signed char q8y1 = vec_xl( 16, y[ib].qs);
440
+
441
+ vector signed int vsumi0 = v0;
442
+
443
+ vsumi0 = vec_msum(q8y0, q5x0, vsumi0);
444
+ vsumi0 = vec_msum(q8y1, q5x1, vsumi0);
445
+
446
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
447
+ }
448
+
449
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
450
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
451
+
452
+ sumf = vec_extract(vsumf0, 0);
453
+
454
+ #endif
455
+ for (; ib < nb; ++ib) {
456
+ uint32_t qh;
457
+ memcpy(&qh, x[ib].qh, sizeof(qh));
458
+
459
+ int sumi0 = 0;
460
+ int sumi1 = 0;
461
+
462
+ for (int j = 0; j < qk/2; ++j) {
463
+ const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
464
+ const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
465
+
466
+ const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0;
467
+ const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1;
468
+
469
+ sumi0 += (x0 * y[ib].qs[j]);
470
+ sumi1 += (x1 * y[ib].qs[j + qk/2]);
471
+ }
472
+
473
+ int sumi = sumi0 + sumi1;
474
+ sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
475
+ }
476
+
477
+ *s = sumf;
478
+ }
479
+
480
+ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
481
+ const int qk = QK8_0;
482
+ const int nb = n / qk;
483
+
484
+ assert(n % qk == 0);
485
+ assert(nrc == 1);
486
+ UNUSED(nrc);
487
+ UNUSED(bx);
488
+ UNUSED(by);
489
+ UNUSED(bs);
490
+
491
+ const block_q8_0 * GGML_RESTRICT x = vx;
492
+ const block_q8_0 * GGML_RESTRICT y = vy;
493
+
494
+ int ib = 0;
495
+ float sumf = 0;
496
+
497
+ #if defined(__POWER9_VECTOR__)
498
+ const vector signed int v0 = vec_splats((int32_t)0);
499
+ vector float vsumf0 = vec_splats(0.0f);
500
+
501
+ #pragma GCC unroll 8
502
+ for (; ib < nb; ++ib) {
503
+ __builtin_prefetch(x[ib].qs, 0, 1);
504
+ __builtin_prefetch(y[ib].qs, 0, 1);
505
+
506
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d));
507
+ vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d));
508
+ vector float vd = vec_mul(vxd, vyd);
509
+
510
+ vector signed char q8x0 = vec_xl( 0, x[ib].qs);
511
+ vector signed char q8x1 = vec_xl(16, x[ib].qs);
512
+ vector signed char q8y0 = vec_xl( 0, y[ib].qs);
513
+ vector signed char q8y1 = vec_xl(16, y[ib].qs);
514
+
515
+ vector signed short qv0 = vec_mule(q8x0, q8y0);
516
+ vector signed short qv1 = vec_mulo(q8x0, q8y0);
517
+ vector signed short qv2 = vec_mule(q8x1, q8y1);
518
+ vector signed short qv3 = vec_mulo(q8x1, q8y1);
519
+
520
+ vector signed int vsumi0 = v0;
521
+ vector signed int vsumi1 = v0;
522
+
523
+ vsumi0 = vec_sum4s(qv0, vsumi0);
524
+ vsumi1 = vec_sum4s(qv1, vsumi1);
525
+ vsumi0 = vec_sum4s(qv2, vsumi0);
526
+ vsumi1 = vec_sum4s(qv3, vsumi1);
527
+
528
+ vsumi0 = vec_add(vsumi0, vsumi1);
529
+
530
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
531
+ }
532
+
533
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
534
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
535
+
536
+ sumf = vec_extract(vsumf0, 0);
537
+
538
+ #endif
539
+ for (; ib < nb; ++ib) {
540
+ int sumi = 0;
541
+
542
+ for (int j = 0; j < qk; j++) {
543
+ sumi += x[ib].qs[j]*y[ib].qs[j];
544
+ }
545
+
546
+ sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d));
547
+ }
548
+
549
+ *s = sumf;
550
+ }
551
+
552
+ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
553
+ assert(nrc == 1);
554
+ UNUSED(nrc);
555
+ UNUSED(bx);
556
+ UNUSED(by);
557
+ UNUSED(bs);
558
+
559
+ const block_q2_K * GGML_RESTRICT x = vx;
560
+ const block_q8_K * GGML_RESTRICT y = vy;
561
+
562
+ const int nb = n / QK_K;
563
+
564
+ #if defined(__POWER9_VECTOR__)
565
+ const vector signed char lowMask = vec_splats((signed char)0x3);
566
+ const vector signed char lowScaleMask = vec_splats((signed char)0xF);
567
+ const vector int v0 = vec_splats((int32_t)0);
568
+ const vector unsigned char v2 = vec_splats((unsigned char)0x2);
569
+ const vector unsigned char v6 = vec_splats((unsigned char)0x6);
570
+ const vector unsigned char v4 = vec_splats((unsigned char)0x4);
571
+
572
+ vector float vsumf0 = vec_splats(0.0f);
573
+ vector float vsumf1 = vec_splats(0.0f);
574
+ vector float vsumf2 = vec_splats(0.0f);
575
+ vector float vsumf3 = vec_splats(0.0f);
576
+
577
+ for (int i = 0; i < nb; ++i) {
578
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d));
579
+ vector float vyd = vec_splats(y[i].d);
580
+ vector float vd = vec_mul(vxd, vyd);
581
+
582
+ vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].dmin));
583
+ vector float vdmin = vec_mul(vxmin, vyd);
584
+
585
+ vector signed short q8ysums0 = vec_xl( 0, y[i].bsums);
586
+ vector signed short q8ysums1 = vec_xl(16, y[i].bsums);
587
+
588
+ vector signed char q2xmins = (vector signed char)vec_xl( 0, x[i].scales);
589
+ vector signed char vscales = vec_and(q2xmins, lowScaleMask);
590
+
591
+ q2xmins = vec_sr(q2xmins, v4);
592
+ vector signed short q2xmins0 = vec_unpackh(q2xmins);
593
+ vector signed short q2xmins1 = vec_unpackl(q2xmins);
594
+
595
+ vector signed int prod0 = vec_mule(q2xmins0, q8ysums0);
596
+ vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0);
597
+ vector signed int prod2 = vec_mule(q2xmins1, q8ysums1);
598
+ vector signed int prod3 = vec_mulo(q2xmins1, q8ysums1);
599
+
600
+ vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0);
601
+ vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1);
602
+ vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2);
603
+ vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3);
604
+
605
+ vector signed int vsumi0 = v0;
606
+ vector signed int vsumi1 = v0;
607
+ vector signed int vsumi2 = v0;
608
+ vector signed int vsumi3 = v0;
609
+ vector signed int vsumi4 = v0;
610
+ vector signed int vsumi5 = v0;
611
+ vector signed int vsumi6 = v0;
612
+ vector signed int vsumi7 = v0;
613
+
614
+ const uint8_t * GGML_RESTRICT q2 = x[i].qs;
615
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
616
+
617
+ for (int j = 0; j < QK_K/128; ++j) {
618
+ __builtin_prefetch(q2, 0, 1);
619
+ __builtin_prefetch(q8, 0, 1);
620
+
621
+ vector signed char qxs0 = (vector signed char)vec_xl( 0, q2);
622
+ vector signed char qxs1 = (vector signed char)vec_xl(16, q2);
623
+ q2 += 32;
624
+
625
+ vector unsigned char q2x00 = (vector unsigned char)vec_and(qxs0, lowMask);
626
+ vector unsigned char q2x01 = (vector unsigned char)vec_and(vec_sr(qxs0, v2), lowMask);
627
+ vector unsigned char q2x02 = (vector unsigned char)vec_and(vec_sr(qxs0, v4), lowMask);
628
+ vector unsigned char q2x03 = (vector unsigned char)vec_and(vec_sr(qxs0, v6), lowMask);
629
+ vector unsigned char q2x10 = (vector unsigned char)vec_and(qxs1, lowMask);
630
+ vector unsigned char q2x11 = (vector unsigned char)vec_and(vec_sr(qxs1, v2), lowMask);
631
+ vector unsigned char q2x12 = (vector unsigned char)vec_and(vec_sr(qxs1, v4), lowMask);
632
+ vector unsigned char q2x13 = (vector unsigned char)vec_and(vec_sr(qxs1, v6), lowMask);
633
+
634
+ vector signed char q8y00 = vec_xl( 0, q8);
635
+ vector signed char q8y10 = vec_xl( 16, q8);
636
+ vector signed char q8y01 = vec_xl( 32, q8);
637
+ vector signed char q8y11 = vec_xl( 48, q8);
638
+ vector signed char q8y02 = vec_xl( 64, q8);
639
+ vector signed char q8y12 = vec_xl( 80, q8);
640
+ vector signed char q8y03 = vec_xl( 96, q8);
641
+ vector signed char q8y13 = vec_xl(112, q8);
642
+ q8 += 128;
643
+
644
+ vector signed int qv0 = vec_msum(q8y00, q2x00, v0);
645
+ vector signed int qv1 = vec_msum(q8y01, q2x01, v0);
646
+ vector signed int qv2 = vec_msum(q8y02, q2x02, v0);
647
+ vector signed int qv3 = vec_msum(q8y03, q2x03, v0);
648
+ vector signed int qv4 = vec_msum(q8y10, q2x10, v0);
649
+ vector signed int qv5 = vec_msum(q8y11, q2x11, v0);
650
+ vector signed int qv6 = vec_msum(q8y12, q2x12, v0);
651
+ vector signed int qv7 = vec_msum(q8y13, q2x13, v0);
652
+
653
+ vector signed short vscales_07 = vec_unpackh(vscales);
654
+ vector signed int vscales_03 = vec_unpackh(vscales_07);
655
+ vector signed int vscales_47 = vec_unpackl(vscales_07);
656
+ vector signed int vs0 = vec_splat(vscales_03, 0);
657
+ vector signed int vs1 = vec_splat(vscales_03, 1);
658
+ vector signed int vs2 = vec_splat(vscales_03, 2);
659
+ vector signed int vs3 = vec_splat(vscales_03, 3);
660
+ vector signed int vs4 = vec_splat(vscales_47, 0);
661
+ vector signed int vs5 = vec_splat(vscales_47, 1);
662
+ vector signed int vs6 = vec_splat(vscales_47, 2);
663
+ vector signed int vs7 = vec_splat(vscales_47, 3);
664
+ vscales = vec_sld(vscales, vscales, 8);
665
+
666
+ vsumi0 = vec_add(vec_mul(qv0, vs0), vsumi0);
667
+ vsumi1 = vec_add(vec_mul(qv1, vs2), vsumi1);
668
+ vsumi2 = vec_add(vec_mul(qv2, vs4), vsumi2);
669
+ vsumi3 = vec_add(vec_mul(qv3, vs6), vsumi3);
670
+ vsumi4 = vec_add(vec_mul(qv4, vs1), vsumi4);
671
+ vsumi5 = vec_add(vec_mul(qv5, vs3), vsumi5);
672
+ vsumi6 = vec_add(vec_mul(qv6, vs5), vsumi6);
673
+ vsumi7 = vec_add(vec_mul(qv7, vs7), vsumi7);
674
+ }
675
+
676
+ vsumi0 = vec_add(vsumi0, vsumi4);
677
+ vsumi1 = vec_add(vsumi1, vsumi5);
678
+ vsumi2 = vec_add(vsumi2, vsumi6);
679
+ vsumi3 = vec_add(vsumi3, vsumi7);
680
+
681
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
682
+ vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
683
+ vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
684
+ vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
685
+ }
686
+
687
+ vsumf0 = vec_add(vsumf0, vsumf2);
688
+ vsumf1 = vec_add(vsumf1, vsumf3);
689
+
690
+ vsumf0 = vec_add(vsumf0, vsumf1);
691
+
692
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
693
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
694
+
695
+ *s = vec_extract(vsumf0, 0);
696
+
697
+ #else
698
+
699
+ float sumf = 0;
700
+
701
+ for (int i = 0; i < nb; ++i) {
702
+
703
+ const uint8_t * q2 = x[i].qs;
704
+ const int8_t * q8 = y[i].qs;
705
+ const uint8_t * sc = x[i].scales;
706
+
707
+ int summs = 0;
708
+ for (int j = 0; j < 16; ++j) {
709
+ summs += y[i].bsums[j] * (sc[j] >> 4);
710
+ }
711
+
712
+ const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
713
+ const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
714
+
715
+ int isum = 0;
716
+ int is = 0;
717
+ int d;
718
+ for (int k = 0; k < QK_K/128; ++k) {
719
+ int shift = 0;
720
+ for (int j = 0; j < 4; ++j) {
721
+ d = sc[is++] & 0xF;
722
+ int isuml = 0;
723
+ for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
724
+ isum += d * isuml;
725
+ d = sc[is++] & 0xF;
726
+ isuml = 0;
727
+ for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
728
+ isum += d * isuml;
729
+ shift += 2;
730
+ q8 += 32;
731
+ }
732
+ q2 += 32;
733
+ }
734
+ sumf += dall * isum - dmin * summs;
735
+ }
736
+ *s = sumf;
737
+ #endif
738
+ }
739
+
740
+ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
741
+ assert(n % QK_K == 0);
742
+ assert(nrc == 1);
743
+ UNUSED(nrc);
744
+ UNUSED(bx);
745
+ UNUSED(by);
746
+ UNUSED(bs);
747
+
748
+ const uint32_t kmask1 = 0x03030303;
749
+ const uint32_t kmask2 = 0x0f0f0f0f;
750
+
751
+ const block_q3_K * GGML_RESTRICT x = vx;
752
+ const block_q8_K * GGML_RESTRICT y = vy;
753
+
754
+ const int nb = n / QK_K;
755
+
756
+ #if defined(__POWER9_VECTOR__)
757
+ const vector signed char lowMask = vec_splats((signed char)0x3);
758
+ const vector signed char lowMask1 = vec_splats((int8_t)0xf);
759
+ const vector signed char lowMask2 = vec_splats((int8_t)0x30);
760
+ const vector int v0 = vec_splats((int32_t)0);
761
+ const vector signed char v1 = vec_splats((signed char)0x1);
762
+ const vector unsigned char v2 = vec_splats((unsigned char)0x2);
763
+ const vector unsigned char v3 = vec_splats((unsigned char)0x3);
764
+ const vector unsigned char v4 = vec_splats((unsigned char)0x4);
765
+ const vector unsigned char v6 = vec_splats((unsigned char)0x6);
766
+ const vector signed char off = vec_splats((signed char)0x20);
767
+
768
+ vector float vsumf0 = vec_splats(0.0f);
769
+ vector float vsumf1 = vec_splats(0.0f);
770
+ vector float vsumf2 = vec_splats(0.0f);
771
+ vector float vsumf3 = vec_splats(0.0f);
772
+
773
+ for (int i = 0; i < nb; ++i) {
774
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d));
775
+ vector float vyd = vec_splats(y[i].d);
776
+ vector float vd = vec_mul(vxd, vyd);
777
+
778
+ UNUSED(kmask1);
779
+ UNUSED(kmask2);
780
+
781
+ vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8);
782
+ vector signed char u1 = vec_and(u0, lowMask1);
783
+ vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4);
784
+ vector signed char u3 = (vector signed char)vec_mergeh((vector signed int)u2, (vector signed int)vec_sr(u2, v2));
785
+ vector signed char u30 = vec_sl(vec_and(u3, lowMask), v4);
786
+ vector signed char u31 = vec_and(u3, lowMask2);
787
+
788
+ u1 = vec_or(u1, u30);
789
+ u2 = vec_or(vec_sr(u0, v4), u31);
790
+
791
+ vector signed char vscales = (vector signed char)vec_mergeh((vector signed long long)u1, (vector signed long long)u2);
792
+ vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].hmask);
793
+ vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].hmask);
794
+
795
+ vscales = vec_sub(vscales, off);
796
+
797
+ vector signed int vsumi0 = v0;
798
+ vector signed int vsumi1 = v0;
799
+ vector signed int vsumi2 = v0;
800
+ vector signed int vsumi3 = v0;
801
+ vector signed int vsumi4 = v0;
802
+ vector signed int vsumi5 = v0;
803
+ vector signed int vsumi6 = v0;
804
+ vector signed int vsumi7 = v0;
805
+
806
+ const uint8_t * GGML_RESTRICT q3 = x[i].qs;
807
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
808
+
809
+ for (int j = 0; j < QK_K/128; ++j) {
810
+ __builtin_prefetch(q3, 0, 1);
811
+ __builtin_prefetch(q8, 0, 1);
812
+
813
+ vector signed char qxs0 = (vector signed char)vec_xl( 0, q3);
814
+ vector signed char qxs1 = (vector signed char)vec_xl(16, q3);
815
+ q3 += 32;
816
+
817
+ //the low 2 bits
818
+ vector signed char qxs00 = vec_and(qxs0, lowMask);
819
+ vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask);
820
+ vector signed char qxs02 = vec_and(vec_sr(qxs0, v4), lowMask);
821
+ vector signed char qxs03 = vec_and(vec_sr(qxs0, v6), lowMask);
822
+ vector signed char qxs10 = vec_and(qxs1, lowMask);
823
+ vector signed char qxs11 = vec_and(vec_sr(qxs1, v2), lowMask);
824
+ vector signed char qxs12 = vec_and(vec_sr(qxs1, v4), lowMask);
825
+ vector signed char qxs13 = vec_and(vec_sr(qxs1, v6), lowMask);
826
+
827
+ //the 3rd bit
828
+ vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2);
829
+ vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, (vector unsigned char)v1)), v2);
830
+ vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2);
831
+ vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v3)), v2);
832
+ vector signed char qxh10 = vec_sl(vec_andc(v1, qxhs1), v2);
833
+ vector signed char qxh11 = vec_sl(vec_andc(v1, vec_sr(qxhs1, (vector unsigned char)v1)), v2);
834
+ vector signed char qxh12 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v2)), v2);
835
+ vector signed char qxh13 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v3)), v2);
836
+ qxhs0 = vec_sr(qxhs0, v4);
837
+ qxhs1 = vec_sr(qxhs1, v4);
838
+
839
+ vector signed char q3x00 = vec_sub(qxs00, qxh00);
840
+ vector signed char q3x01 = vec_sub(qxs01, qxh01);
841
+ vector signed char q3x02 = vec_sub(qxs02, qxh02);
842
+ vector signed char q3x03 = vec_sub(qxs03, qxh03);
843
+ vector signed char q3x10 = vec_sub(qxs10, qxh10);
844
+ vector signed char q3x11 = vec_sub(qxs11, qxh11);
845
+ vector signed char q3x12 = vec_sub(qxs12, qxh12);
846
+ vector signed char q3x13 = vec_sub(qxs13, qxh13);
847
+
848
+ vector signed char q8y00 = vec_xl( 0, q8);
849
+ vector signed char q8y10 = vec_xl( 16, q8);
850
+ vector signed char q8y01 = vec_xl( 32, q8);
851
+ vector signed char q8y11 = vec_xl( 48, q8);
852
+ vector signed char q8y02 = vec_xl( 64, q8);
853
+ vector signed char q8y12 = vec_xl( 80, q8);
854
+ vector signed char q8y03 = vec_xl( 96, q8);
855
+ vector signed char q8y13 = vec_xl(112, q8);
856
+ q8 += 128;
857
+
858
+ vector signed short vscales_h = vec_unpackh(vscales);
859
+ vector signed short vs0 = vec_splat(vscales_h, 0);
860
+ vector signed short vs1 = vec_splat(vscales_h, 1);
861
+ vector signed short vs2 = vec_splat(vscales_h, 2);
862
+ vector signed short vs3 = vec_splat(vscales_h, 3);
863
+ vector signed short vs4 = vec_splat(vscales_h, 4);
864
+ vector signed short vs5 = vec_splat(vscales_h, 5);
865
+ vector signed short vs6 = vec_splat(vscales_h, 6);
866
+ vector signed short vs7 = vec_splat(vscales_h, 7);
867
+ vscales = vec_sld(vscales, vscales, 8);
868
+
869
+ vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00));
870
+ vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01));
871
+ vector signed short qv02 = vec_add(vec_mule(q3x02, q8y02), vec_mulo(q3x02, q8y02));
872
+ vector signed short qv03 = vec_add(vec_mule(q3x03, q8y03), vec_mulo(q3x03, q8y03));
873
+ vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10));
874
+ vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11));
875
+ vector signed short qv12 = vec_add(vec_mule(q3x12, q8y12), vec_mulo(q3x12, q8y12));
876
+ vector signed short qv13 = vec_add(vec_mule(q3x13, q8y13), vec_mulo(q3x13, q8y13));
877
+
878
+ vsumi0 = vec_msum(qv00, vs0, vsumi0);
879
+ vsumi1 = vec_msum(qv01, vs2, vsumi1);
880
+ vsumi2 = vec_msum(qv02, vs4, vsumi2);
881
+ vsumi3 = vec_msum(qv03, vs6, vsumi3);
882
+ vsumi4 = vec_msum(qv10, vs1, vsumi4);
883
+ vsumi5 = vec_msum(qv11, vs3, vsumi5);
884
+ vsumi6 = vec_msum(qv12, vs5, vsumi6);
885
+ vsumi7 = vec_msum(qv13, vs7, vsumi7);
886
+ }
887
+
888
+ vsumi0 = vec_add(vsumi0, vsumi4);
889
+ vsumi1 = vec_add(vsumi1, vsumi5);
890
+ vsumi2 = vec_add(vsumi2, vsumi6);
891
+ vsumi3 = vec_add(vsumi3, vsumi7);
892
+
893
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
894
+ vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
895
+ vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
896
+ vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
897
+ }
898
+
899
+ vsumf0 = vec_add(vsumf0, vsumf2);
900
+ vsumf1 = vec_add(vsumf1, vsumf3);
901
+
902
+ vsumf0 = vec_add(vsumf0, vsumf1);
903
+
904
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
905
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
906
+
907
+ *s = vec_extract(vsumf0, 0);
908
+
909
+ #else
910
+ // scalar version
911
+ // This function is written like this so the compiler can manage to vectorize most of it
912
+ // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
913
+ // manually vectorized version above. Every other version I tried would run at least 4 times slower.
914
+ // The ideal situation would be if we could just write the code once, and the compiler would
915
+ // automatically produce the best possible set of machine instructions, instead of us having to manually
916
+ // write vectorized versions for AVX, ARM_NEON, etc.
917
+
918
+ int8_t aux8[QK_K];
919
+ int16_t aux16[8];
920
+ float sums [8];
921
+ int32_t aux32[8];
922
+ memset(sums, 0, 8*sizeof(float));
923
+
924
+ uint32_t auxs[4];
925
+ const int8_t * scales = (const int8_t*)auxs;
926
+
927
+ float sumf = 0;
928
+ for (int i = 0; i < nb; ++i) {
929
+ const uint8_t * GGML_RESTRICT q3 = x[i].qs;
930
+ const uint8_t * GGML_RESTRICT hm = x[i].hmask;
931
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
932
+ memset(aux32, 0, 8*sizeof(int32_t));
933
+ int8_t * GGML_RESTRICT a = aux8;
934
+ uint8_t m = 1;
935
+ for (int j = 0; j < QK_K; j += 128) {
936
+ for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
937
+ for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
938
+ a += 32; m <<= 1;
939
+ for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
940
+ for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
941
+ a += 32; m <<= 1;
942
+ for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
943
+ for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
944
+ a += 32; m <<= 1;
945
+ for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
946
+ for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
947
+ a += 32; m <<= 1;
948
+ q3 += 32;
949
+ }
950
+ a = aux8;
951
+
952
+ memcpy(auxs, x[i].scales, 12);
953
+ uint32_t tmp = auxs[2];
954
+ auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
955
+ auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
956
+ auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
957
+ auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
958
+ for (int j = 0; j < QK_K/16; ++j) {
959
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
960
+ for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
961
+ q8 += 8; a += 8;
962
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
963
+ for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
964
+ q8 += 8; a += 8;
965
+ }
966
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
967
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
968
+ }
969
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
970
+ *s = sumf;
971
+
972
+ #endif
973
+
974
+ }
975
+
976
+ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
977
+ assert(n % QK_K == 0);
978
+ assert(nrc == 1);
979
+ UNUSED(nrc);
980
+ UNUSED(bx);
981
+ UNUSED(by);
982
+ UNUSED(bs);
983
+
984
+ const block_q4_K * GGML_RESTRICT x = vx;
985
+ const block_q8_K * GGML_RESTRICT y = vy;
986
+
987
+ const int nb = n / QK_K;
988
+
989
+ static const uint32_t kmask1 = 0x3f3f3f3f;
990
+ static const uint32_t kmask2 = 0x0f0f0f0f;
991
+ static const uint32_t kmask3 = 0x03030303;
992
+
993
+ uint32_t utmp[4];
994
+
995
+ #if defined(__POWER9_VECTOR__)
996
+ const vector signed char lowMask = vec_splats((signed char)0xF);
997
+ const vector signed char lowMask1 = vec_splats((int8_t)0x3f);
998
+ const vector signed char lowMask2 = vec_splats((int8_t)0x30);
999
+ const vector int v0 = vec_splats((int32_t)0);
1000
+ const vector unsigned char v2 = vec_splats((uint8_t)2);
1001
+ const vector unsigned char v4 = vec_splats((unsigned char)0x4);
1002
+
1003
+ vector float vsumf0 = vec_splats(0.0f);
1004
+ vector float vsumf1 = vec_splats(0.0f);
1005
+ vector float vsumf2 = vec_splats(0.0f);
1006
+ vector float vsumf3 = vec_splats(0.0f);
1007
+
1008
+ for (int i = 0; i < nb; ++i) {
1009
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d));
1010
+ vector float vyd = vec_splats(y[i].d);
1011
+ vector float vd = vec_mul(vxd, vyd);
1012
+
1013
+ vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].dmin));
1014
+ vector float vdmin = vec_mul(vxmin, vyd);
1015
+
1016
+ vector signed short q8ysums0 = vec_xl( 0, y[i].bsums);
1017
+ vector signed short q8ysums1 = vec_xl(16, y[i].bsums);
1018
+
1019
+ UNUSED(kmask1);
1020
+ UNUSED(kmask2);
1021
+ UNUSED(kmask3);
1022
+ UNUSED(utmp);
1023
+
1024
+ vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8);
1025
+ vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2);
1026
+ vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4);
1027
+ vector signed char u3 = vec_sr(u2, v4);
1028
+
1029
+ vector signed char u30 = u1;
1030
+ vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3);
1031
+
1032
+ u1 = vec_and(u0, lowMask1);
1033
+ u2 = vec_or(u30, u31);
1034
+
1035
+ vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2);
1036
+
1037
+ vector signed short vscales = vec_unpackh(utmps);
1038
+ vector signed short q4xmins = vec_unpackl(utmps);
1039
+ vector signed short q4xmins0 = vec_mergeh(q4xmins, q4xmins);
1040
+ vector signed short q4xmins1 = vec_mergel(q4xmins, q4xmins);
1041
+
1042
+ vector signed int prod0 = vec_mule(q4xmins0, q8ysums0);
1043
+ vector signed int prod1 = vec_mule(q4xmins1, q8ysums1);
1044
+ vector signed int prod2 = vec_mulo(q4xmins0, q8ysums0);
1045
+ vector signed int prod3 = vec_mulo(q4xmins1, q8ysums1);
1046
+
1047
+ vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0);
1048
+ vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1);
1049
+ vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2);
1050
+ vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3);
1051
+
1052
+ vector signed int vsumi0 = v0;
1053
+ vector signed int vsumi1 = v0;
1054
+ vector signed int vsumi2 = v0;
1055
+ vector signed int vsumi3 = v0;
1056
+
1057
+ const uint8_t * GGML_RESTRICT q4 = x[i].qs;
1058
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
1059
+
1060
+ for (int j = 0; j < QK_K/64; j+=2) {
1061
+ __builtin_prefetch(q4, 0, 1);
1062
+ __builtin_prefetch(q8, 0, 1);
1063
+
1064
+ vector signed char qxs0 = (vector signed char)vec_xl( 0, q4);
1065
+ vector signed char qxs1 = (vector signed char)vec_xl(16, q4);
1066
+ vector signed char qxs2 = (vector signed char)vec_xl(32, q4);
1067
+ vector signed char qxs3 = (vector signed char)vec_xl(48, q4);
1068
+ q4 += 64;
1069
+
1070
+ vector unsigned char q4x00 = (vector unsigned char)vec_and(qxs0, lowMask);
1071
+ vector unsigned char q4x01 = (vector unsigned char)vec_sr(qxs0, v4);
1072
+ vector unsigned char q4x10 = (vector unsigned char)vec_and(qxs1, lowMask);
1073
+ vector unsigned char q4x11 = (vector unsigned char)vec_sr(qxs1, v4);
1074
+ vector unsigned char q4x20 = (vector unsigned char)vec_and(qxs2, lowMask);
1075
+ vector unsigned char q4x21 = (vector unsigned char)vec_sr(qxs2, v4);
1076
+ vector unsigned char q4x30 = (vector unsigned char)vec_and(qxs3, lowMask);
1077
+ vector unsigned char q4x31 = (vector unsigned char)vec_sr(qxs3, v4);
1078
+
1079
+ vector signed char q8y00 = vec_xl( 0, q8);
1080
+ vector signed char q8y10 = vec_xl( 16, q8);
1081
+ vector signed char q8y01 = vec_xl( 32, q8);
1082
+ vector signed char q8y11 = vec_xl( 48, q8);
1083
+ vector signed char q8y20 = vec_xl( 64, q8);
1084
+ vector signed char q8y30 = vec_xl( 80, q8);
1085
+ vector signed char q8y21 = vec_xl( 96, q8);
1086
+ vector signed char q8y31 = vec_xl(112, q8);
1087
+ q8 += 128;
1088
+
1089
+ vector signed int qv00 = vec_msum(q8y00, q4x00, v0);
1090
+ vector signed int qv01 = vec_msum(q8y01, q4x01, v0);
1091
+ vector signed int qv10 = vec_msum(q8y10, q4x10, v0);
1092
+ vector signed int qv11 = vec_msum(q8y11, q4x11, v0);
1093
+ vector signed int qv20 = vec_msum(q8y20, q4x20, v0);
1094
+ vector signed int qv21 = vec_msum(q8y21, q4x21, v0);
1095
+ vector signed int qv30 = vec_msum(q8y30, q4x30, v0);
1096
+ vector signed int qv31 = vec_msum(q8y31, q4x31, v0);
1097
+
1098
+ vector signed int vscales_h = vec_unpackh(vscales);
1099
+ vector signed int vs0 = vec_splat(vscales_h, 0);
1100
+ vector signed int vs1 = vec_splat(vscales_h, 1);
1101
+ vector signed int vs2 = vec_splat(vscales_h, 2);
1102
+ vector signed int vs3 = vec_splat(vscales_h, 3);
1103
+ vscales = vec_sld(vscales, vscales, 8);
1104
+
1105
+ vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0);
1106
+ vsumi1 = vec_add(vec_mul(qv01, vs1), vsumi1);
1107
+ vsumi2 = vec_add(vec_mul(qv20, vs2), vsumi2);
1108
+ vsumi3 = vec_add(vec_mul(qv21, vs3), vsumi3);
1109
+
1110
+ vsumi0 = vec_add(vec_mul(qv10, vs0), vsumi0);
1111
+ vsumi1 = vec_add(vec_mul(qv11, vs1), vsumi1);
1112
+ vsumi2 = vec_add(vec_mul(qv30, vs2), vsumi2);
1113
+ vsumi3 = vec_add(vec_mul(qv31, vs3), vsumi3);
1114
+ }
1115
+
1116
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
1117
+ vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
1118
+ vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
1119
+ vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
1120
+ }
1121
+
1122
+ vsumf0 = vec_add(vsumf0, vsumf2);
1123
+ vsumf1 = vec_add(vsumf1, vsumf3);
1124
+
1125
+ vsumf0 = vec_add(vsumf0, vsumf1);
1126
+
1127
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
1128
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
1129
+
1130
+ *s = vec_extract(vsumf0, 0);
1131
+
1132
+ #else
1133
+
1134
+ const uint8_t * scales = (const uint8_t*)&utmp[0];
1135
+ const uint8_t * mins = (const uint8_t*)&utmp[2];
1136
+
1137
+ int8_t aux8[QK_K];
1138
+ int16_t aux16[8];
1139
+ float sums [8];
1140
+ int32_t aux32[8];
1141
+ memset(sums, 0, 8*sizeof(float));
1142
+
1143
+ float sumf = 0;
1144
+ for (int i = 0; i < nb; ++i) {
1145
+ const uint8_t * GGML_RESTRICT q4 = x[i].qs;
1146
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
1147
+ memset(aux32, 0, 8*sizeof(int32_t));
1148
+ int8_t * GGML_RESTRICT a = aux8;
1149
+ for (int j = 0; j < QK_K/64; ++j) {
1150
+ for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
1151
+ a += 32;
1152
+ for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
1153
+ a += 32; q4 += 32;
1154
+ }
1155
+ memcpy(utmp, x[i].scales, 12);
1156
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
1157
+ const uint32_t uaux = utmp[1] & kmask1;
1158
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
1159
+ utmp[2] = uaux;
1160
+ utmp[0] &= kmask1;
1161
+
1162
+ int sumi = 0;
1163
+ for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
1164
+ a = aux8;
1165
+ int is = 0;
1166
+ for (int j = 0; j < QK_K/32; ++j) {
1167
+ int32_t scale = scales[is++];
1168
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
1169
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
1170
+ q8 += 8; a += 8;
1171
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
1172
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
1173
+ q8 += 8; a += 8;
1174
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
1175
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
1176
+ q8 += 8; a += 8;
1177
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
1178
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
1179
+ q8 += 8; a += 8;
1180
+ }
1181
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
1182
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
1183
+ const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
1184
+ sumf -= dmin * sumi;
1185
+ }
1186
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
1187
+ *s = sumf;
1188
+ #endif
1189
+ }
1190
+
1191
+ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
1192
+ assert(n % QK_K == 0);
1193
+ assert(nrc == 1);
1194
+ UNUSED(nrc);
1195
+ UNUSED(bx);
1196
+ UNUSED(by);
1197
+ UNUSED(bs);
1198
+
1199
+ const block_q5_K * GGML_RESTRICT x = vx;
1200
+ const block_q8_K * GGML_RESTRICT y = vy;
1201
+
1202
+ const int nb = n / QK_K;
1203
+
1204
+ static const uint32_t kmask1 = 0x3f3f3f3f;
1205
+ static const uint32_t kmask2 = 0x0f0f0f0f;
1206
+ static const uint32_t kmask3 = 0x03030303;
1207
+
1208
+ uint32_t utmp[4];
1209
+
1210
+ #if defined(__POWER9_VECTOR__)
1211
+ const vector signed char lowMask = vec_splats((signed char)0xF);
1212
+ const vector signed char lowMask1 = vec_splats((int8_t)0x3f);
1213
+ const vector signed char lowMask2 = vec_splats((int8_t)0x30);
1214
+ const vector int v0 = vec_splats((int32_t)0);
1215
+ const vector unsigned char v1 = vec_splats((unsigned char)0x1);
1216
+ const vector unsigned char v2 = vec_splats((unsigned char)0x2);
1217
+ const vector unsigned char v3 = vec_splats((unsigned char)0x3);
1218
+ const vector unsigned char v4 = vec_splats((unsigned char)0x4);
1219
+
1220
+ vector float vsumf0 = vec_splats(0.0f);
1221
+ vector float vsumf1 = vec_splats(0.0f);
1222
+ vector float vsumf2 = vec_splats(0.0f);
1223
+ vector float vsumf3 = vec_splats(0.0f);
1224
+
1225
+ for (int i = 0; i < nb; ++i) {
1226
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d));
1227
+ vector float vyd = vec_splats(y[i].d);
1228
+ vector float vd = vec_mul(vxd, vyd);
1229
+
1230
+ vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].dmin));
1231
+ vector float vdmin = vec_mul(vxmin, vyd);
1232
+
1233
+ UNUSED(kmask1);
1234
+ UNUSED(kmask2);
1235
+ UNUSED(kmask3);
1236
+ UNUSED(utmp);
1237
+
1238
+ vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8);
1239
+ vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2);
1240
+ vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4);
1241
+ vector signed char u3 = vec_sr(u2, v4);
1242
+
1243
+ vector signed char u30 = u1;
1244
+ vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3);
1245
+
1246
+ u1 = vec_and(u0, lowMask1);
1247
+ u2 = vec_or(u30, u31);
1248
+
1249
+ vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2);
1250
+
1251
+ vector signed short q8ysums0 = vec_xl( 0, y[i].bsums);
1252
+ vector signed short q8ysums1 = vec_xl(16, y[i].bsums);
1253
+
1254
+ vector signed short vscales = vec_unpackh(utmps);
1255
+
1256
+ vector signed short q5xmins = vec_unpackl(utmps);
1257
+ vector signed short q5xmins0 = vec_mergeh(q5xmins, q5xmins);
1258
+ vector signed short q5xmins1 = vec_mergel(q5xmins, q5xmins);
1259
+
1260
+ vector signed int prod0 = vec_mule(q5xmins0, q8ysums0);
1261
+ vector signed int prod1 = vec_mule(q5xmins1, q8ysums1);
1262
+ vector signed int prod2 = vec_mulo(q5xmins0, q8ysums0);
1263
+ vector signed int prod3 = vec_mulo(q5xmins1, q8ysums1);
1264
+
1265
+ vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0);
1266
+ vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1);
1267
+ vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2);
1268
+ vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3);
1269
+
1270
+ vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].qh);
1271
+ vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].qh);
1272
+
1273
+ vector signed int vsumi0 = v0;
1274
+ vector signed int vsumi1 = v0;
1275
+ vector signed int vsumi2 = v0;
1276
+ vector signed int vsumi3 = v0;
1277
+
1278
+ const uint8_t * GGML_RESTRICT q5 = x[i].qs;
1279
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
1280
+
1281
+ for (int j = 0; j < QK_K/64; ++j) {
1282
+ __builtin_prefetch(q5, 0, 1);
1283
+ __builtin_prefetch(q8, 0, 1);
1284
+
1285
+ vector signed char qxs0 = (vector signed char)vec_xl( 0, q5);
1286
+ vector signed char qxs1 = (vector signed char)vec_xl(16, q5);
1287
+ q5 += 32;
1288
+
1289
+ vector signed char qxs00 = vec_and(qxs0, lowMask);
1290
+ vector signed char qxs01 = vec_sr(qxs0, v4);
1291
+ vector signed char qxs10 = vec_and(qxs1, lowMask);
1292
+ vector signed char qxs11 = vec_sr(qxs1, v4);
1293
+
1294
+ vector signed char q5h00 = vec_sl(vec_and((vector signed char)v1, qxhs0), v4);
1295
+ vector signed char q5h01 = vec_sl(vec_and((vector signed char)v2, qxhs0), v3);
1296
+ vector signed char q5h10 = vec_sl(vec_and((vector signed char)v1, qxhs1), v4);
1297
+ vector signed char q5h11 = vec_sl(vec_and((vector signed char)v2, qxhs1), v3);
1298
+ qxhs0 = vec_sr(qxhs0, v2);
1299
+ qxhs1 = vec_sr(qxhs1, v2);
1300
+
1301
+ vector unsigned char q5x00 = (vector unsigned char)vec_or(q5h00, qxs00);
1302
+ vector unsigned char q5x01 = (vector unsigned char)vec_or(q5h01, qxs01);
1303
+ vector unsigned char q5x10 = (vector unsigned char)vec_or(q5h10, qxs10);
1304
+ vector unsigned char q5x11 = (vector unsigned char)vec_or(q5h11, qxs11);
1305
+
1306
+ vector signed char q8y00 = vec_xl( 0, q8);
1307
+ vector signed char q8y10 = vec_xl(16, q8);
1308
+ vector signed char q8y01 = vec_xl(32, q8);
1309
+ vector signed char q8y11 = vec_xl(48, q8);
1310
+ q8 += 64;
1311
+
1312
+ vector signed int qv00 = vec_msum(q8y00, q5x00, v0);
1313
+ vector signed int qv01 = vec_msum(q8y01, q5x01, v0);
1314
+ vector signed int qv10 = vec_msum(q8y10, q5x10, v0);
1315
+ vector signed int qv11 = vec_msum(q8y11, q5x11, v0);
1316
+
1317
+ vector signed int vscales_h = vec_unpackh(vscales);
1318
+ vector signed int vs0 = vec_splat(vscales_h, 0);
1319
+ vector signed int vs1 = vec_splat(vscales_h, 1);
1320
+ vscales = vec_sld(vscales, vscales, 12);
1321
+
1322
+ vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0);
1323
+ vsumi1 = vec_add(vec_mul(qv10, vs0), vsumi1);
1324
+ vsumi2 = vec_add(vec_mul(qv01, vs1), vsumi2);
1325
+ vsumi3 = vec_add(vec_mul(qv11, vs1), vsumi3);
1326
+ }
1327
+
1328
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
1329
+ vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
1330
+ vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
1331
+ vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
1332
+ }
1333
+
1334
+ vsumf0 = vec_add(vsumf0, vsumf2);
1335
+ vsumf1 = vec_add(vsumf1, vsumf3);
1336
+
1337
+ vsumf0 = vec_add(vsumf0, vsumf1);
1338
+
1339
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
1340
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
1341
+
1342
+ *s = vec_extract(vsumf0, 0);
1343
+
1344
+ #else
1345
+
1346
+ const uint8_t * scales = (const uint8_t*)&utmp[0];
1347
+ const uint8_t * mins = (const uint8_t*)&utmp[2];
1348
+
1349
+ int8_t aux8[QK_K];
1350
+ int16_t aux16[8];
1351
+ float sums [8];
1352
+ int32_t aux32[8];
1353
+ memset(sums, 0, 8*sizeof(float));
1354
+
1355
+ float sumf = 0;
1356
+ for (int i = 0; i < nb; ++i) {
1357
+ const uint8_t * GGML_RESTRICT q4 = x[i].qs;
1358
+ const uint8_t * GGML_RESTRICT hm = x[i].qh;
1359
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
1360
+ memset(aux32, 0, 8*sizeof(int32_t));
1361
+ int8_t * GGML_RESTRICT a = aux8;
1362
+ uint8_t m = 1;
1363
+ for (int j = 0; j < QK_K/64; ++j) {
1364
+ for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
1365
+ for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
1366
+ a += 32; m <<= 1;
1367
+ for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
1368
+ for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
1369
+ a += 32; m <<= 1;
1370
+ q4 += 32;
1371
+ }
1372
+ memcpy(utmp, x[i].scales, 12);
1373
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
1374
+ const uint32_t uaux = utmp[1] & kmask1;
1375
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
1376
+ utmp[2] = uaux;
1377
+ utmp[0] &= kmask1;
1378
+
1379
+ int sumi = 0;
1380
+ for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
1381
+ a = aux8;
1382
+ int is = 0;
1383
+ for (int j = 0; j < QK_K/32; ++j) {
1384
+ int32_t scale = scales[is++];
1385
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
1386
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
1387
+ q8 += 8; a += 8;
1388
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
1389
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
1390
+ q8 += 8; a += 8;
1391
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
1392
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
1393
+ q8 += 8; a += 8;
1394
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
1395
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
1396
+ q8 += 8; a += 8;
1397
+ }
1398
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
1399
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
1400
+ const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
1401
+ sumf -= dmin * sumi;
1402
+ }
1403
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
1404
+ *s = sumf;
1405
+ #endif
1406
+ }
1407
+
1408
+ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
1409
+ assert(n % QK_K == 0);
1410
+ assert(nrc == 1);
1411
+ UNUSED(nrc);
1412
+ UNUSED(bx);
1413
+ UNUSED(by);
1414
+ UNUSED(bs);
1415
+
1416
+ const block_q6_K * GGML_RESTRICT x = vx;
1417
+ const block_q8_K * GGML_RESTRICT y = vy;
1418
+
1419
+ const int nb = n / QK_K;
1420
+
1421
+ #if defined(__POWER9_VECTOR__)
1422
+ const vector signed char lowMask = vec_splats((signed char)0xF);
1423
+ const vector int v0 = vec_splats((int32_t)0);
1424
+ const vector unsigned char v2 = vec_splats((unsigned char)0x2);
1425
+ const vector unsigned char v3 = vec_splats((unsigned char)0x3);
1426
+ const vector unsigned char v4 = vec_splats((unsigned char)0x4);
1427
+ const vector unsigned char v6 = vec_splats((unsigned char)0x6);
1428
+ const vector signed char off = vec_splats((signed char)0x20);
1429
+
1430
+ vector float vsumf0 = vec_splats(0.0f);
1431
+ vector float vsumf1 = vec_splats(0.0f);
1432
+ vector float vsumf2 = vec_splats(0.0f);
1433
+ vector float vsumf3 = vec_splats(0.0f);
1434
+
1435
+ for (int i = 0; i < nb; ++i) {
1436
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d));
1437
+ vector float vyd = vec_splats(y[i].d);
1438
+ vector float vd = vec_mul(vxd, vyd);
1439
+
1440
+ vector signed int vsumi0 = v0;
1441
+ vector signed int vsumi1 = v0;
1442
+ vector signed int vsumi2 = v0;
1443
+ vector signed int vsumi3 = v0;
1444
+ vector signed int vsumi4 = v0;
1445
+ vector signed int vsumi5 = v0;
1446
+ vector signed int vsumi6 = v0;
1447
+ vector signed int vsumi7 = v0;
1448
+
1449
+ const uint8_t * GGML_RESTRICT q6 = x[i].ql;
1450
+ const uint8_t * GGML_RESTRICT qh = x[i].qh;
1451
+ const int8_t * GGML_RESTRICT qs = x[i].scales;
1452
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
1453
+
1454
+ for (int j = 0; j < QK_K/128; ++j) {
1455
+ __builtin_prefetch(q6, 0, 0);
1456
+ __builtin_prefetch(qh, 0, 0);
1457
+ __builtin_prefetch(q8, 0, 0);
1458
+
1459
+ vector signed char qxs0 = (vector signed char)vec_xl( 0, q6);
1460
+ vector signed char qxs1 = (vector signed char)vec_xl(16, q6);
1461
+ vector signed char qxs2 = (vector signed char)vec_xl(32, q6);
1462
+ vector signed char qxs3 = (vector signed char)vec_xl(48, q6);
1463
+ q6 += 64;
1464
+
1465
+ vector signed char qxs00 = vec_and(qxs0, lowMask);
1466
+ vector signed char qxs01 = vec_sr(qxs0, v4);
1467
+ vector signed char qxs10 = vec_and(qxs1, lowMask);
1468
+ vector signed char qxs11 = vec_sr(qxs1, v4);
1469
+ vector signed char qxs20 = vec_and(qxs2, lowMask);
1470
+ vector signed char qxs21 = vec_sr(qxs2, v4);
1471
+ vector signed char qxs30 = vec_and(qxs3, lowMask);
1472
+ vector signed char qxs31 = vec_sr(qxs3, v4);
1473
+
1474
+ vector signed char qxhs0 = (vector signed char)vec_xl( 0, qh);
1475
+ vector signed char qxhs1 = (vector signed char)vec_xl(16, qh);
1476
+ qh += 32;
1477
+
1478
+ vector signed char qxh00 = vec_sl(vec_and((vector signed char)v3, qxhs0), v4);
1479
+ vector signed char qxh01 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v4)), v4);
1480
+ vector signed char qxh10 = vec_sl(vec_and((vector signed char)v3, qxhs1), v4);
1481
+ vector signed char qxh11 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v4)), v4);
1482
+ vector signed char qxh20 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v2)), v4);
1483
+ vector signed char qxh21 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v6)), v4);
1484
+ vector signed char qxh30 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v2)), v4);
1485
+ vector signed char qxh31 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v6)), v4);
1486
+
1487
+ vector signed char q6x00 = vec_sub(vec_or(qxh00, qxs00), off);
1488
+ vector signed char q6x01 = vec_sub(vec_or(qxh01, qxs01), off);
1489
+ vector signed char q6x10 = vec_sub(vec_or(qxh10, qxs10), off);
1490
+ vector signed char q6x11 = vec_sub(vec_or(qxh11, qxs11), off);
1491
+ vector signed char q6x20 = vec_sub(vec_or(qxh20, qxs20), off);
1492
+ vector signed char q6x21 = vec_sub(vec_or(qxh21, qxs21), off);
1493
+ vector signed char q6x30 = vec_sub(vec_or(qxh30, qxs30), off);
1494
+ vector signed char q6x31 = vec_sub(vec_or(qxh31, qxs31), off);
1495
+
1496
+ vector signed char q8y00 = vec_xl( 0, q8);
1497
+ vector signed char q8y10 = vec_xl( 16, q8);
1498
+ vector signed char q8y20 = vec_xl( 32, q8);
1499
+ vector signed char q8y30 = vec_xl( 48, q8);
1500
+ vector signed char q8y01 = vec_xl( 64, q8);
1501
+ vector signed char q8y11 = vec_xl( 80, q8);
1502
+ vector signed char q8y21 = vec_xl( 96, q8);
1503
+ vector signed char q8y31 = vec_xl(112, q8);
1504
+ q8 += 128;
1505
+
1506
+ vector signed short qv00 = vec_add(vec_mule(q6x00, q8y00), vec_mulo(q6x00, q8y00));
1507
+ vector signed short qv10 = vec_add(vec_mule(q6x10, q8y10), vec_mulo(q6x10, q8y10));
1508
+ vector signed short qv20 = vec_add(vec_mule(q6x20, q8y20), vec_mulo(q6x20, q8y20));
1509
+ vector signed short qv30 = vec_add(vec_mule(q6x30, q8y30), vec_mulo(q6x30, q8y30));
1510
+ vector signed short qv01 = vec_add(vec_mule(q6x01, q8y01), vec_mulo(q6x01, q8y01));
1511
+ vector signed short qv11 = vec_add(vec_mule(q6x11, q8y11), vec_mulo(q6x11, q8y11));
1512
+ vector signed short qv21 = vec_add(vec_mule(q6x21, q8y21), vec_mulo(q6x21, q8y21));
1513
+ vector signed short qv31 = vec_add(vec_mule(q6x31, q8y31), vec_mulo(q6x31, q8y31));
1514
+
1515
+ vector signed short vscales = vec_unpackh(vec_xl_len(qs, 8));
1516
+ qs += 8;
1517
+
1518
+ vector signed short vs0 = vec_splat(vscales, 0);
1519
+ vector signed short vs1 = vec_splat(vscales, 1);
1520
+ vector signed short vs2 = vec_splat(vscales, 2);
1521
+ vector signed short vs3 = vec_splat(vscales, 3);
1522
+ vector signed short vs4 = vec_splat(vscales, 4);
1523
+ vector signed short vs5 = vec_splat(vscales, 5);
1524
+ vector signed short vs6 = vec_splat(vscales, 6);
1525
+ vector signed short vs7 = vec_splat(vscales, 7);
1526
+
1527
+ vsumi0 = vec_msum(qv00, vs0, vsumi0);
1528
+ vsumi1 = vec_msum(qv01, vs4, vsumi1);
1529
+ vsumi2 = vec_msum(qv10, vs1, vsumi2);
1530
+ vsumi3 = vec_msum(qv11, vs5, vsumi3);
1531
+ vsumi4 = vec_msum(qv20, vs2, vsumi4);
1532
+ vsumi5 = vec_msum(qv21, vs6, vsumi5);
1533
+ vsumi6 = vec_msum(qv30, vs3, vsumi6);
1534
+ vsumi7 = vec_msum(qv31, vs7, vsumi7);
1535
+ }
1536
+
1537
+ vsumi0 = vec_add(vsumi0, vsumi4);
1538
+ vsumi1 = vec_add(vsumi1, vsumi5);
1539
+ vsumi2 = vec_add(vsumi2, vsumi6);
1540
+ vsumi3 = vec_add(vsumi3, vsumi7);
1541
+
1542
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
1543
+ vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
1544
+ vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
1545
+ vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
1546
+ }
1547
+
1548
+ vsumf0 = vec_add(vsumf0, vsumf2);
1549
+ vsumf1 = vec_add(vsumf1, vsumf3);
1550
+
1551
+ vsumf0 = vec_add(vsumf0, vsumf1);
1552
+
1553
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
1554
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
1555
+
1556
+ *s = vec_extract(vsumf0, 0);
1557
+
1558
+ #else
1559
+
1560
+ int8_t aux8[QK_K];
1561
+ int16_t aux16[8];
1562
+ float sums [8];
1563
+ int32_t aux32[8];
1564
+ memset(sums, 0, 8*sizeof(float));
1565
+
1566
+ float sumf = 0;
1567
+ for (int i = 0; i < nb; ++i) {
1568
+ const uint8_t * GGML_RESTRICT q4 = x[i].ql;
1569
+ const uint8_t * GGML_RESTRICT qh = x[i].qh;
1570
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
1571
+ memset(aux32, 0, 8*sizeof(int32_t));
1572
+ int8_t * GGML_RESTRICT a = aux8;
1573
+ for (int j = 0; j < QK_K; j += 128) {
1574
+ for (int l = 0; l < 32; ++l) {
1575
+ a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
1576
+ a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
1577
+ a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
1578
+ a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
1579
+ }
1580
+ a += 128;
1581
+ q4 += 64;
1582
+ qh += 32;
1583
+ }
1584
+ a = aux8;
1585
+ int is = 0;
1586
+ for (int j = 0; j < QK_K/16; ++j) {
1587
+ int scale = x[i].scales[is++];
1588
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
1589
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
1590
+ q8 += 8; a += 8;
1591
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
1592
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
1593
+ q8 += 8; a += 8;
1594
+ }
1595
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
1596
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
1597
+ }
1598
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
1599
+ *s = sumf;
1600
+ #endif
1601
+ }
1602
+
1603
+ #if defined (__POWER9_VECTOR__)
1604
+ static const int8_t keven_signs_q2xs[1024] = {
1605
+ 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1,
1606
+ 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1,
1607
+ 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1,
1608
+ 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1,
1609
+ 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1,
1610
+ 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1,
1611
+ 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1,
1612
+ 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1,
1613
+ 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1,
1614
+ 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1,
1615
+ 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1,
1616
+ 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1,
1617
+ 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1,
1618
+ 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1,
1619
+ 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1,
1620
+ 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1,
1621
+ 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1,
1622
+ 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1,
1623
+ 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1,
1624
+ 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1,
1625
+ 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1,
1626
+ 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1,
1627
+ 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1,
1628
+ 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1,
1629
+ 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1,
1630
+ 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
1631
+ 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,
1632
+ 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1,
1633
+ 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1,
1634
+ 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1,
1635
+ 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,
1636
+ 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1,
1637
+ };
1638
+ #endif
1639
+
1640
+ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
1641
+ assert(n % QK_K == 0);
1642
+ assert(nrc == 1);
1643
+ UNUSED(nrc);
1644
+ UNUSED(bx);
1645
+ UNUSED(by);
1646
+ UNUSED(bs);
1647
+
1648
+ const block_iq2_xxs * GGML_RESTRICT x = vx;
1649
+ const block_q8_K * GGML_RESTRICT y = vy;
1650
+
1651
+ const int nb = n / QK_K;
1652
+
1653
+ #if defined(__POWER9_VECTOR__)
1654
+ const vector int v0 = vec_splats((int32_t)0);
1655
+ vector float vsumf0 = vec_splats(0.0f);
1656
+ vector float vsumf1 = vec_splats(0.0f);
1657
+ vector float vsumf2 = vec_splats(0.0f);
1658
+ vector float vsumf3 = vec_splats(0.0f);
1659
+
1660
+ const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
1661
+
1662
+ for (int i = 0; i < nb; ++i) {
1663
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d));
1664
+ vector float vyd = vec_splats(y[i].d);
1665
+ vector float vd = vec_mul(vxd, vyd);
1666
+
1667
+ vector signed int vsumi0 = v0;
1668
+ vector signed int vsumi1 = v0;
1669
+ vector signed int vsumi2 = v0;
1670
+ vector signed int vsumi3 = v0;
1671
+
1672
+ const uint16_t * GGML_RESTRICT q2 = x[i].qs;
1673
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
1674
+
1675
+ for (int j = 0; j < QK_K/32; j += 2) {
1676
+ __builtin_prefetch(q2, 0, 1);
1677
+ __builtin_prefetch(q8, 0, 1);
1678
+
1679
+ uint32_t aux32[4];
1680
+ const uint8_t * aux8 = (const uint8_t *)aux32;
1681
+
1682
+ memcpy(aux32, q2, 4*sizeof(uint32_t));
1683
+ q2 += 8;
1684
+
1685
+ vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1])};
1686
+ vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3])};
1687
+ vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9])};
1688
+ vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11])};
1689
+
1690
+ vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127))};
1691
+ vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127))};
1692
+ vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127))};
1693
+ vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127))};
1694
+
1695
+ vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0);
1696
+ vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1);
1697
+ vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2);
1698
+ vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3);
1699
+
1700
+ vector signed char q8y0 = vec_xl( 0, q8);
1701
+ vector signed char q8y1 = vec_xl(16, q8);
1702
+ vector signed char q8y2 = vec_xl(32, q8);
1703
+ vector signed char q8y3 = vec_xl(48, q8);
1704
+ q8 += 64;
1705
+
1706
+ vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0));
1707
+ vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1));
1708
+ vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2));
1709
+ vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3));
1710
+
1711
+ const uint16_t ls0 = aux32[1] >> 28;
1712
+ const uint16_t ls1 = aux32[3] >> 28;
1713
+
1714
+ vector signed short vscales01 = vec_splats((int16_t)(2*ls0+1));
1715
+ vector signed short vscales23 = vec_splats((int16_t)(2*ls1+1));
1716
+
1717
+ vsumi0 = vec_msum(qv0, vscales01, vsumi0);
1718
+ vsumi1 = vec_msum(qv1, vscales01, vsumi1);
1719
+ vsumi2 = vec_msum(qv2, vscales23, vsumi2);
1720
+ vsumi3 = vec_msum(qv3, vscales23, vsumi3);
1721
+ }
1722
+
1723
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
1724
+ vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
1725
+ vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
1726
+ vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
1727
+ }
1728
+
1729
+ vsumf0 = vec_add(vsumf0, vsumf2);
1730
+ vsumf1 = vec_add(vsumf1, vsumf3);
1731
+
1732
+ vsumf0 = vec_add(vsumf0, vsumf1);
1733
+
1734
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
1735
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
1736
+
1737
+ *s = 0.125f * vec_extract(vsumf0, 0);
1738
+
1739
+ #else
1740
+
1741
+ uint32_t aux32[2];
1742
+ const uint8_t * aux8 = (const uint8_t *)aux32;
1743
+
1744
+ float sumf = 0.f;
1745
+ for (int i = 0; i < nb; ++i) {
1746
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
1747
+ const uint16_t * GGML_RESTRICT q2 = x[i].qs;
1748
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
1749
+ int32_t bsum = 0;
1750
+ for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
1751
+ memcpy(aux32, q2, 2*sizeof(uint32_t));
1752
+ q2 += 4;
1753
+ const uint32_t ls = 2*(aux32[1] >> 28) + 1;
1754
+ int32_t sumi = 0;
1755
+ for (int l = 0; l < 4; ++l) {
1756
+ const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
1757
+ const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
1758
+ for (int j = 0; j < 8; ++j) {
1759
+ sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
1760
+ }
1761
+ q8 += 8;
1762
+ }
1763
+ bsum += sumi * ls;
1764
+ }
1765
+ sumf += d * bsum;
1766
+ }
1767
+ *s = 0.125f * sumf;
1768
+ #endif
1769
+ }
1770
+
1771
+ void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
1772
+ assert(n % QK_K == 0);
1773
+ assert(nrc == 1);
1774
+ UNUSED(nrc);
1775
+ UNUSED(bx);
1776
+ UNUSED(by);
1777
+ UNUSED(bs);
1778
+
1779
+ const block_iq2_xs * GGML_RESTRICT x = vx;
1780
+ const block_q8_K * GGML_RESTRICT y = vy;
1781
+
1782
+ const int nb = n / QK_K;
1783
+
1784
+ #if defined(__POWER9_VECTOR__)
1785
+ const vector int v0 = vec_splats((int32_t)0);
1786
+ vector float vsumf0 = vec_splats(0.0f);
1787
+ vector float vsumf1 = vec_splats(0.0f);
1788
+ vector float vsumf2 = vec_splats(0.0f);
1789
+ vector float vsumf3 = vec_splats(0.0f);
1790
+
1791
+ const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
1792
+
1793
+ for (int i = 0; i < nb; ++i) {
1794
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d));
1795
+ vector float vyd = vec_splats(y[i].d);
1796
+ vector float vd = vec_mul(vxd, vyd);
1797
+
1798
+ vector signed int vsumi0 = v0;
1799
+ vector signed int vsumi1 = v0;
1800
+ vector signed int vsumi2 = v0;
1801
+ vector signed int vsumi3 = v0;
1802
+
1803
+ const uint16_t * GGML_RESTRICT q2 = x[i].qs;
1804
+ const uint8_t * GGML_RESTRICT sc = x[i].scales;
1805
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
1806
+
1807
+ for (int j = 0; j < QK_K/64; ++j) {
1808
+ __builtin_prefetch(q2, 0, 1);
1809
+ __builtin_prefetch(q8, 0, 1);
1810
+
1811
+ vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xs_grid + (q2[0] & 511)), *(const int64_t *)(iq2xs_grid + (q2[1] & 511))};
1812
+ vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xs_grid + (q2[2] & 511)), *(const int64_t *)(iq2xs_grid + (q2[3] & 511))};
1813
+ vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xs_grid + (q2[4] & 511)), *(const int64_t *)(iq2xs_grid + (q2[5] & 511))};
1814
+ vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xs_grid + (q2[6] & 511)), *(const int64_t *)(iq2xs_grid + (q2[7] & 511))};
1815
+
1816
+ vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((q2[0] >> 9))), *(const int64_t *)(signs64 + ((q2[1] >> 9)))};
1817
+ vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((q2[2] >> 9))), *(const int64_t *)(signs64 + ((q2[3] >> 9)))};
1818
+ vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((q2[4] >> 9))), *(const int64_t *)(signs64 + ((q2[5] >> 9)))};
1819
+ vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((q2[6] >> 9))), *(const int64_t *)(signs64 + ((q2[7] >> 9)))};
1820
+ q2 += 8;
1821
+
1822
+ vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0);
1823
+ vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1);
1824
+ vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2);
1825
+ vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3);
1826
+
1827
+ vector signed char q8y0 = vec_xl( 0, q8);
1828
+ vector signed char q8y1 = vec_xl(16, q8);
1829
+ vector signed char q8y2 = vec_xl(32, q8);
1830
+ vector signed char q8y3 = vec_xl(48, q8);
1831
+ q8 += 64;
1832
+
1833
+ vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0));
1834
+ vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1));
1835
+ vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2));
1836
+ vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3));
1837
+
1838
+ const uint16_t ls0 = (uint16_t)(sc[0] & 0xf);
1839
+ const uint16_t ls1 = (uint16_t)(sc[0] >> 4);
1840
+ const uint16_t ls2 = (uint16_t)(sc[1] & 0xf);
1841
+ const uint16_t ls3 = (uint16_t)(sc[1] >> 4);
1842
+ sc += 2;
1843
+
1844
+ vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1));
1845
+ vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1));
1846
+ vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1));
1847
+ vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1));
1848
+
1849
+ vsumi0 = vec_msum(qv0, vscales0, vsumi0);
1850
+ vsumi1 = vec_msum(qv1, vscales1, vsumi1);
1851
+ vsumi2 = vec_msum(qv2, vscales2, vsumi2);
1852
+ vsumi3 = vec_msum(qv3, vscales3, vsumi3);
1853
+ }
1854
+
1855
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
1856
+ vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
1857
+ vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
1858
+ vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
1859
+ }
1860
+
1861
+ vsumf0 = vec_add(vsumf0, vsumf2);
1862
+ vsumf1 = vec_add(vsumf1, vsumf3);
1863
+
1864
+ vsumf0 = vec_add(vsumf0, vsumf1);
1865
+
1866
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
1867
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
1868
+
1869
+ *s = 0.125f * vec_extract(vsumf0, 0);
1870
+
1871
+ #else
1872
+
1873
+ float sumf = 0.f;
1874
+ for (int i = 0; i < nb; ++i) {
1875
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
1876
+ const uint16_t * GGML_RESTRICT q2 = x[i].qs;
1877
+ const uint8_t * GGML_RESTRICT sc = x[i].scales;
1878
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
1879
+ int32_t bsum = 0;
1880
+ for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
1881
+ const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1;
1882
+ const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1;
1883
+ int32_t sumi = 0;
1884
+ for (int l = 0; l < 2; ++l) {
1885
+ const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
1886
+ const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
1887
+ for (int j = 0; j < 8; ++j) {
1888
+ sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
1889
+ }
1890
+ q8 += 8;
1891
+ }
1892
+ bsum += sumi * ls1;
1893
+ sumi = 0;
1894
+ for (int l = 2; l < 4; ++l) {
1895
+ const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
1896
+ const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
1897
+ for (int j = 0; j < 8; ++j) {
1898
+ sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
1899
+ }
1900
+ q8 += 8;
1901
+ }
1902
+ bsum += sumi * ls2;
1903
+ q2 += 4;
1904
+ }
1905
+ sumf += d * bsum;
1906
+ }
1907
+ *s = 0.125f * sumf;
1908
+ #endif
1909
+ }
1910
+
1911
+ void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
1912
+ assert(n % QK_K == 0);
1913
+ assert(nrc == 1);
1914
+ UNUSED(nrc);
1915
+ UNUSED(bx);
1916
+ UNUSED(by);
1917
+ UNUSED(bs);
1918
+
1919
+ const block_iq2_s * GGML_RESTRICT x = vx;
1920
+ const block_q8_K * GGML_RESTRICT y = vy;
1921
+
1922
+ const int nb = n / QK_K;
1923
+
1924
+ #if defined(__POWER9_VECTOR__)
1925
+ static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1926
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
1927
+ };
1928
+
1929
+ static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
1930
+
1931
+ const vector int v0 = vec_splats((int32_t)0);
1932
+
1933
+ vector float vsumf0 = vec_splats(0.0f);
1934
+ vector float vsumf1 = vec_splats(0.0f);
1935
+ vector float vsumf2 = vec_splats(0.0f);
1936
+ vector float vsumf3 = vec_splats(0.0f);
1937
+
1938
+ const vector unsigned char mask0 = vec_xl( 0, k_mask1);
1939
+ const vector unsigned char mask1 = vec_xl(16, k_mask1);
1940
+ const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2);
1941
+
1942
+ for (int i = 0; i < nb; ++i) {
1943
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d));
1944
+ vector float vyd = vec_splats(y[i].d);
1945
+ vector float vd = vec_mul(vxd, vyd);
1946
+
1947
+ vector signed int vsumi0 = v0;
1948
+ vector signed int vsumi1 = v0;
1949
+ vector signed int vsumi2 = v0;
1950
+ vector signed int vsumi3 = v0;
1951
+
1952
+ const uint8_t * GGML_RESTRICT q2 = x[i].qs;
1953
+ const uint8_t * GGML_RESTRICT qh = x[i].qh;
1954
+ const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8);
1955
+ const uint8_t * GGML_RESTRICT sc = x[i].scales;
1956
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
1957
+
1958
+ for (int j = 0; j < QK_K/32; j += 2) {
1959
+ __builtin_prefetch(q2, 0, 1);
1960
+ __builtin_prefetch(q8, 0, 1);
1961
+
1962
+ vector signed long long aux64x2_0 = {*(const int64_t *)(iq2s_grid + (q2[0] | ((qh[0] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[1] | ((qh[0] << 6) & 0x300)))};
1963
+ vector signed long long aux64x2_1 = {*(const int64_t *)(iq2s_grid + (q2[2] | ((qh[0] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[3] | ((qh[0] << 2) & 0x300)))};
1964
+ vector signed long long aux64x2_2 = {*(const int64_t *)(iq2s_grid + (q2[4] | ((qh[1] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[5] | ((qh[1] << 6) & 0x300)))};
1965
+ vector signed long long aux64x2_3 = {*(const int64_t *)(iq2s_grid + (q2[6] | ((qh[1] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[7] | ((qh[1] << 2) & 0x300)))};
1966
+ q2 += 8;
1967
+ qh += 2;
1968
+
1969
+ vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]);
1970
+ vector signed char vsigns23 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]);
1971
+ signs += 4;
1972
+
1973
+ vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0);
1974
+ vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1);
1975
+ vector signed char vsigns2 = vec_perm(vsigns23, vsigns23, mask0);
1976
+ vector signed char vsigns3 = vec_perm(vsigns23, vsigns23, mask1);
1977
+
1978
+ vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2);
1979
+ vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2);
1980
+ vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2);
1981
+ vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2);
1982
+
1983
+ vector signed char q2x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux64x2_0), vsigns0);
1984
+ vector signed char q2x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux64x2_1), vsigns1);
1985
+ vector signed char q2x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux64x2_2), vsigns2);
1986
+ vector signed char q2x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux64x2_3), vsigns3);
1987
+
1988
+ vector signed char q8y0 = vec_xl( 0, q8);
1989
+ vector signed char q8y1 = vec_xl(16, q8);
1990
+ vector signed char q8y2 = vec_xl(32, q8);
1991
+ vector signed char q8y3 = vec_xl(48, q8);
1992
+ q8 += 64;
1993
+
1994
+ vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0));
1995
+ vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1));
1996
+ vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2));
1997
+ vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3));
1998
+
1999
+ const uint16_t ls0 = (uint16_t)(sc[0] & 0xf);
2000
+ const uint16_t ls1 = (uint16_t)(sc[0] >> 4);
2001
+ const uint16_t ls2 = (uint16_t)(sc[1] & 0xf);
2002
+ const uint16_t ls3 = (uint16_t)(sc[1] >> 4);
2003
+ sc += 2;
2004
+
2005
+ vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1));
2006
+ vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1));
2007
+ vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1));
2008
+ vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1));
2009
+
2010
+ vsumi0 = vec_msum(qv0, vscales0, vsumi0);
2011
+ vsumi1 = vec_msum(qv1, vscales1, vsumi1);
2012
+ vsumi2 = vec_msum(qv2, vscales2, vsumi2);
2013
+ vsumi3 = vec_msum(qv3, vscales3, vsumi3);
2014
+ }
2015
+
2016
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
2017
+ vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
2018
+ vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
2019
+ vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
2020
+ }
2021
+
2022
+ vsumf0 = vec_add(vsumf0, vsumf2);
2023
+ vsumf1 = vec_add(vsumf1, vsumf3);
2024
+
2025
+ vsumf0 = vec_add(vsumf0, vsumf1);
2026
+
2027
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
2028
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
2029
+
2030
+ *s = 0.125f * vec_extract(vsumf0, 0);
2031
+
2032
+ #else
2033
+
2034
+ float sumf = 0;
2035
+ for (int i = 0; i < nb; i++) {
2036
+
2037
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
2038
+ const int8_t * q8 = y[i].qs;
2039
+ const uint8_t * qs = x[i].qs;
2040
+ const uint8_t * qh = x[i].qh;
2041
+ const uint8_t * signs = qs + QK_K/8;
2042
+
2043
+ int bsum = 0;
2044
+ for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
2045
+ int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf);
2046
+ int ls2 = 1 + 2*(x[i].scales[ib32] >> 4);
2047
+ int sumi1 = 0, sumi2 = 0;
2048
+ for (int l = 0; l < 2; ++l) {
2049
+ const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
2050
+ for (int j = 0; j < 8; ++j) {
2051
+ sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
2052
+ }
2053
+ q8 += 8;
2054
+ }
2055
+ for (int l = 2; l < 4; ++l) {
2056
+ const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
2057
+ for (int j = 0; j < 8; ++j) {
2058
+ sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
2059
+ }
2060
+ q8 += 8;
2061
+ }
2062
+ bsum += ls1 * sumi1 + ls2 * sumi2;
2063
+ qs += 4;
2064
+ signs += 4;
2065
+ }
2066
+
2067
+ sumf += d * bsum;
2068
+ }
2069
+
2070
+ *s = 0.125f * sumf;
2071
+
2072
+ #endif
2073
+
2074
+ }
2075
+
2076
+ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
2077
+ assert(n % QK_K == 0);
2078
+ assert(nrc == 1);
2079
+ UNUSED(nrc);
2080
+ UNUSED(bx);
2081
+ UNUSED(by);
2082
+ UNUSED(bs);
2083
+
2084
+ const block_iq3_xxs * GGML_RESTRICT x = vx;
2085
+ const block_q8_K * GGML_RESTRICT y = vy;
2086
+
2087
+ const int nb = n / QK_K;
2088
+
2089
+ #if defined(__POWER9_VECTOR__)
2090
+ const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
2091
+
2092
+ const vector int v0 = vec_splats((int32_t)0);
2093
+
2094
+ vector float vsumf0 = vec_splats(0.0f);
2095
+ vector float vsumf1 = vec_splats(0.0f);
2096
+ vector float vsumf2 = vec_splats(0.0f);
2097
+ vector float vsumf3 = vec_splats(0.0f);
2098
+
2099
+ for (int i = 0; i < nb; ++i) {
2100
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d));
2101
+ vector float vyd = vec_splats(y[i].d);
2102
+ vector float vd = vec_mul(vxd, vyd);
2103
+
2104
+ vector signed int vsumi0 = v0;
2105
+ vector signed int vsumi1 = v0;
2106
+ vector signed int vsumi2 = v0;
2107
+ vector signed int vsumi3 = v0;
2108
+
2109
+ const uint8_t * GGML_RESTRICT q3 = x[i].qs;
2110
+ const uint32_t * GGML_RESTRICT signs = (const uint32_t *)(x[i].qs + QK_K/4);
2111
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
2112
+
2113
+ #pragma GCC unroll 1
2114
+ for (int j = 0; j < QK_K/32; j += 2) {
2115
+ __builtin_prefetch(q3, 0, 1);
2116
+ __builtin_prefetch(q8, 0, 1);
2117
+
2118
+ vector unsigned int aux32x4_0 = {iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]};
2119
+ vector unsigned int aux32x4_1 = {iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]};
2120
+ vector unsigned int aux32x4_2 = {iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]};
2121
+ vector unsigned int aux32x4_3 = {iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]};
2122
+ q3 += 16;
2123
+
2124
+ vector unsigned long long aux64x2_0 = {(uint64_t)(signs64[(signs[0] >> 0) & 127]), (uint64_t)(signs64[(signs[0] >> 7) & 127])};
2125
+ vector unsigned long long aux64x2_1 = {(uint64_t)(signs64[(signs[0] >> 14) & 127]), (uint64_t)(signs64[(signs[0] >> 21) & 127])};
2126
+ vector unsigned long long aux64x2_2 = {(uint64_t)(signs64[(signs[1] >> 0) & 127]), (uint64_t)(signs64[(signs[1] >> 7) & 127])};
2127
+ vector unsigned long long aux64x2_3 = {(uint64_t)(signs64[(signs[1] >> 14) & 127]), (uint64_t)(signs64[(signs[1] >> 21) & 127])};
2128
+
2129
+ vector signed char q3x0 = vec_mul((vector signed char)aux64x2_0, (vector signed char)aux32x4_0);
2130
+ vector signed char q3x1 = vec_mul((vector signed char)aux64x2_1, (vector signed char)aux32x4_1);
2131
+ vector signed char q3x2 = vec_mul((vector signed char)aux64x2_2, (vector signed char)aux32x4_2);
2132
+ vector signed char q3x3 = vec_mul((vector signed char)aux64x2_3, (vector signed char)aux32x4_3);
2133
+
2134
+ vector signed char q8y0 = vec_xl( 0, q8);
2135
+ vector signed char q8y1 = vec_xl(16, q8);
2136
+ vector signed char q8y2 = vec_xl(32, q8);
2137
+ vector signed char q8y3 = vec_xl(48, q8);
2138
+ q8 += 64;
2139
+
2140
+ vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0));
2141
+ vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1));
2142
+ vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2));
2143
+ vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3));
2144
+
2145
+ const uint16_t ls0 = (uint16_t)(signs[0] >> 28);
2146
+ const uint16_t ls1 = (uint16_t)(signs[1] >> 28);
2147
+ signs += 2;
2148
+
2149
+ vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1));
2150
+ vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1));
2151
+
2152
+ vsumi0 = vec_msum(qv0, vscales01, vsumi0);
2153
+ vsumi1 = vec_msum(qv1, vscales01, vsumi1);
2154
+ vsumi2 = vec_msum(qv2, vscales23, vsumi2);
2155
+ vsumi3 = vec_msum(qv3, vscales23, vsumi3);
2156
+ }
2157
+
2158
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
2159
+ vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
2160
+ vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
2161
+ vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
2162
+ }
2163
+
2164
+ vsumf0 = vec_add(vsumf0, vsumf2);
2165
+ vsumf1 = vec_add(vsumf1, vsumf3);
2166
+
2167
+ vsumf0 = vec_add(vsumf0, vsumf1);
2168
+
2169
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
2170
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
2171
+
2172
+ *s = 0.25f * vec_extract(vsumf0, 0);
2173
+
2174
+ #else
2175
+
2176
+ uint32_t aux32;
2177
+
2178
+ float sumf = 0.f;
2179
+ for (int i = 0; i < nb; ++i) {
2180
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
2181
+ const uint8_t * GGML_RESTRICT q3 = x[i].qs;
2182
+ const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4;
2183
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
2184
+ int32_t bsum = 0;
2185
+ for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
2186
+ memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t);
2187
+ const uint32_t ls = 2*(aux32 >> 28) + 1;
2188
+ int32_t sumi = 0;
2189
+ for (int l = 0; l < 4; ++l) {
2190
+ const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]);
2191
+ const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]);
2192
+ const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
2193
+ for (int j = 0; j < 4; ++j) {
2194
+ sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1);
2195
+ sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1);
2196
+ }
2197
+ q8 += 8;
2198
+ }
2199
+ q3 += 8;
2200
+ bsum += sumi * ls;
2201
+ }
2202
+ sumf += d * bsum;
2203
+ }
2204
+ *s = 0.25f * sumf;
2205
+ #endif
2206
+ }
2207
+
2208
+ void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
2209
+ assert(n % QK_K == 0);
2210
+ assert(nrc == 1);
2211
+ UNUSED(nrc);
2212
+ UNUSED(bx);
2213
+ UNUSED(by);
2214
+ UNUSED(bs);
2215
+
2216
+ const block_iq3_s * GGML_RESTRICT x = vx;
2217
+ const block_q8_K * GGML_RESTRICT y = vy;
2218
+
2219
+ const int nb = n / QK_K;
2220
+
2221
+ #if defined(__POWER9_VECTOR__)
2222
+ static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
2223
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
2224
+ };
2225
+
2226
+ static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
2227
+
2228
+ const vector int v0 = vec_splats((int32_t)0);
2229
+
2230
+ vector float vsumf0 = vec_splats(0.0f);
2231
+ vector float vsumf1 = vec_splats(0.0f);
2232
+ vector float vsumf2 = vec_splats(0.0f);
2233
+ vector float vsumf3 = vec_splats(0.0f);
2234
+
2235
+ const vector unsigned char mask0 = vec_xl( 0, k_mask1);
2236
+ const vector unsigned char mask1 = vec_xl(16, k_mask1);
2237
+ const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2);
2238
+
2239
+ for (int i = 0; i < nb; ++i) {
2240
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d));
2241
+ vector float vyd = vec_splats(y[i].d);
2242
+ vector float vd = vec_mul(vxd, vyd);
2243
+
2244
+ const uint8_t * GGML_RESTRICT q3 = x[i].qs;
2245
+ const uint8_t * GGML_RESTRICT qh = x[i].qh;
2246
+ const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].signs);
2247
+ const uint8_t * GGML_RESTRICT sc = x[i].scales;
2248
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
2249
+
2250
+ vector signed int vsumi0 = v0;
2251
+ vector signed int vsumi1 = v0;
2252
+ vector signed int vsumi2 = v0;
2253
+ vector signed int vsumi3 = v0;
2254
+
2255
+ for (int j = 0; j < QK_K/32; j += 2) {
2256
+ __builtin_prefetch(q3, 0, 1);
2257
+ __builtin_prefetch(q8, 0, 1);
2258
+
2259
+ vector unsigned int aux32x4_0 = {iq3s_grid[q3[ 0] | ((qh[0] << 8) & 256)], iq3s_grid[q3[ 1] | ((qh[0] << 7) & 256)],
2260
+ iq3s_grid[q3[ 2] | ((qh[0] << 6) & 256)], iq3s_grid[q3[ 3] | ((qh[0] << 5) & 256)]};
2261
+ vector unsigned int aux32x4_1 = {iq3s_grid[q3[ 4] | ((qh[0] << 4) & 256)], iq3s_grid[q3[ 5] | ((qh[0] << 3) & 256)],
2262
+ iq3s_grid[q3[ 6] | ((qh[0] << 2) & 256)], iq3s_grid[q3[ 7] | ((qh[0] << 1) & 256)]};
2263
+ vector unsigned int aux32x4_2 = {iq3s_grid[q3[ 8] | ((qh[1] << 8) & 256)], iq3s_grid[q3[ 9] | ((qh[1] << 7) & 256)],
2264
+ iq3s_grid[q3[10] | ((qh[1] << 6) & 256)], iq3s_grid[q3[11] | ((qh[1] << 5) & 256)]};
2265
+ vector unsigned int aux32x4_3 = {iq3s_grid[q3[12] | ((qh[1] << 4) & 256)], iq3s_grid[q3[13] | ((qh[1] << 3) & 256)],
2266
+ iq3s_grid[q3[14] | ((qh[1] << 2) & 256)], iq3s_grid[q3[15] | ((qh[1] << 1) & 256)]};
2267
+ q3 += 16;
2268
+ qh += 2;
2269
+
2270
+ vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]);
2271
+ vector signed char vsigns02 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]);
2272
+ signs += 4;
2273
+
2274
+ vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0);
2275
+ vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1);
2276
+ vector signed char vsigns2 = vec_perm(vsigns02, vsigns02, mask0);
2277
+ vector signed char vsigns3 = vec_perm(vsigns02, vsigns02, mask1);
2278
+
2279
+ vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2);
2280
+ vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2);
2281
+ vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2);
2282
+ vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2);
2283
+
2284
+ vector signed char q3x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux32x4_0), vsigns0);
2285
+ vector signed char q3x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux32x4_1), vsigns1);
2286
+ vector signed char q3x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux32x4_2), vsigns2);
2287
+ vector signed char q3x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux32x4_3), vsigns3);
2288
+
2289
+ vector signed char q8y0 = vec_xl( 0, q8);
2290
+ vector signed char q8y1 = vec_xl(16, q8);
2291
+ vector signed char q8y2 = vec_xl(32, q8);
2292
+ vector signed char q8y3 = vec_xl(48, q8);
2293
+ q8 += 64;
2294
+
2295
+ vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0));
2296
+ vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1));
2297
+ vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2));
2298
+ vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3));
2299
+
2300
+ const uint16_t ls0 = (uint16_t)(sc[0] & 0xf);
2301
+ const uint16_t ls1 = (uint16_t)(sc[0] >> 4);
2302
+ sc ++;
2303
+
2304
+ vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1));
2305
+ vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1));
2306
+
2307
+ vsumi0 = vec_msum(qv0, vscales01, vsumi0);
2308
+ vsumi1 = vec_msum(qv1, vscales01, vsumi1);
2309
+ vsumi2 = vec_msum(qv2, vscales23, vsumi2);
2310
+ vsumi3 = vec_msum(qv3, vscales23, vsumi3);
2311
+ }
2312
+
2313
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
2314
+ vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
2315
+ vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
2316
+ vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
2317
+ }
2318
+
2319
+ vsumf0 = vec_add(vsumf0, vsumf2);
2320
+ vsumf1 = vec_add(vsumf1, vsumf3);
2321
+
2322
+ vsumf0 = vec_add(vsumf0, vsumf1);
2323
+
2324
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
2325
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
2326
+
2327
+ *s = vec_extract(vsumf0, 0);
2328
+
2329
+ #else
2330
+
2331
+ float sumf = 0.f;
2332
+ for (int i = 0; i < nb; ++i) {
2333
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
2334
+ const uint8_t * GGML_RESTRICT qs = x[i].qs;
2335
+ const uint8_t * GGML_RESTRICT qh = x[i].qh;
2336
+ const uint8_t * GGML_RESTRICT signs = x[i].signs;
2337
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
2338
+ int32_t bsum = 0;
2339
+ for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
2340
+ const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1;
2341
+ const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1;
2342
+ int32_t sumi = 0;
2343
+ for (int l = 0; l < 4; ++l) {
2344
+ const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256)));
2345
+ const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256)));
2346
+ for (int j = 0; j < 4; ++j) {
2347
+ sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
2348
+ sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
2349
+ }
2350
+ q8 += 8;
2351
+ }
2352
+ qs += 8;
2353
+ signs += 4;
2354
+ bsum += sumi * ls1;
2355
+ sumi = 0;
2356
+ for (int l = 0; l < 4; ++l) {
2357
+ const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256)));
2358
+ const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256)));
2359
+ for (int j = 0; j < 4; ++j) {
2360
+ sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
2361
+ sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
2362
+ }
2363
+ q8 += 8;
2364
+ }
2365
+ qs += 8;
2366
+ signs += 4;
2367
+ bsum += sumi * ls2;
2368
+ }
2369
+ sumf += d * bsum;
2370
+ }
2371
+ *s = sumf;
2372
+ #endif
2373
+ }
2374
+
2375
+ void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
2376
+ assert(n % QK_K == 0);
2377
+ assert(nrc == 1);
2378
+ UNUSED(nrc);
2379
+ UNUSED(bx);
2380
+ UNUSED(by);
2381
+ UNUSED(bs);
2382
+
2383
+ const block_iq1_s * GGML_RESTRICT x = vx;
2384
+ const block_q8_K * GGML_RESTRICT y = vy;
2385
+
2386
+ const int nb = n / QK_K;
2387
+
2388
+ #if defined(__POWER9_VECTOR__)
2389
+ const vector unsigned char v0 = vec_splats((unsigned char)0x0);
2390
+ const vector unsigned short vsign = vec_splats((unsigned short)0x8000);
2391
+
2392
+ vector float vsumf0 = vec_splats(0.0f);
2393
+ vector float vsumf1 = vec_splats(0.0f);
2394
+ vector float vsumf2 = vec_splats(0.0f);
2395
+ vector float vsumf3 = vec_splats(0.0f);
2396
+
2397
+ for (int i = 0; i < nb; ++i) {
2398
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d));
2399
+ vector float vyd = vec_splats(y[i].d);
2400
+ vector float vd = vec_mul(vxd, vyd);
2401
+
2402
+ vector signed int vsumi0 = vec_splats((int32_t)0);
2403
+ vector signed int vsumi1 = vec_splats((int32_t)0);
2404
+ vector signed int vsumi2 = vec_splats((int32_t)0);
2405
+ vector signed int vsumi3 = vec_splats((int32_t)0);
2406
+ vector signed int vsumi8 = vec_splats((int32_t)0);
2407
+
2408
+ const uint8_t * GGML_RESTRICT q1 = x[i].qs;
2409
+ const uint16_t * GGML_RESTRICT qh = x[i].qh;
2410
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
2411
+ const int16_t * GGML_RESTRICT qs = y[i].bsums;
2412
+
2413
+ for (int j = 0; j < QK_K/32; j += 2) {
2414
+ __builtin_prefetch(q1, 0, 1);
2415
+ __builtin_prefetch(qh, 0, 1);
2416
+ __builtin_prefetch(q8, 0, 1);
2417
+
2418
+ vector signed long long aux64x2_0 = {*(const int64_t *)(iq1s_grid + (q1[0] | ((qh[0] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[1] | ((qh[0] << 5) & 0x700)))};
2419
+ vector signed long long aux64x2_1 = {*(const int64_t *)(iq1s_grid + (q1[2] | ((qh[0] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[3] | ((qh[0] >> 1) & 0x700)))};
2420
+ vector signed long long aux64x2_2 = {*(const int64_t *)(iq1s_grid + (q1[4] | ((qh[1] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[5] | ((qh[1] << 5) & 0x700)))};
2421
+ vector signed long long aux64x2_3 = {*(const int64_t *)(iq1s_grid + (q1[6] | ((qh[1] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[7] | ((qh[1] >> 1) & 0x700)))};
2422
+ q1 += 8;
2423
+
2424
+ vector signed char q1x0 = (vector signed char)aux64x2_0;
2425
+ vector signed char q1x1 = (vector signed char)aux64x2_1;
2426
+ vector signed char q1x2 = (vector signed char)aux64x2_2;
2427
+ vector signed char q1x3 = (vector signed char)aux64x2_3;
2428
+
2429
+ vector signed char q8y0 = vec_xl( 0, q8);
2430
+ vector signed char q8y1 = vec_xl(16, q8);
2431
+ vector signed char q8y2 = vec_xl(32, q8);
2432
+ vector signed char q8y3 = vec_xl(48, q8);
2433
+ q8 += 64;
2434
+
2435
+ vector signed short qv0 = vec_add(vec_mule(q1x0, q8y0), vec_mulo(q1x0, q8y0));
2436
+ vector signed short qv1 = vec_add(vec_mule(q1x1, q8y1), vec_mulo(q1x1, q8y1));
2437
+ vector signed short qv2 = vec_add(vec_mule(q1x2, q8y2), vec_mulo(q1x2, q8y2));
2438
+ vector signed short qv3 = vec_add(vec_mule(q1x3, q8y3), vec_mulo(q1x3, q8y3));
2439
+
2440
+ const uint16_t ls0 = (uint16_t)((qh[0] >> 12) & 7);
2441
+ const uint16_t ls1 = (uint16_t)((qh[1] >> 12) & 7);
2442
+
2443
+ vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1));
2444
+ vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1));
2445
+ vector signed short vscales = vec_sld(vscales23, vscales01, 8);
2446
+
2447
+ vsumi0 = vec_msum(qv0, vscales01, vsumi0);
2448
+ vsumi1 = vec_msum(qv1, vscales01, vsumi1);
2449
+ vsumi2 = vec_msum(qv2, vscales23, vsumi2);
2450
+ vsumi3 = vec_msum(qv3, vscales23, vsumi3);
2451
+
2452
+ vector signed short q8ysums = vec_xl_len(qs, 8);
2453
+ qs += 4;
2454
+ q8ysums = vec_mergeh(q8ysums, (vector signed short)v0);
2455
+
2456
+ vector signed short qxh = (vector signed short)vec_sld(vec_splats(qh[1]), vec_splats(qh[0]), 8);
2457
+ qh += 2;
2458
+ vector __bool short vsel = vec_cmpge(qxh, (vector signed short)v0);
2459
+
2460
+ vector signed short q8ysum = vec_sel((vector signed short)vec_xor((vector unsigned short)q8ysums, vsign), q8ysums, vsel);
2461
+
2462
+ vsumi8 = vec_add(vec_mule(q8ysum, vscales), vsumi8);
2463
+ }
2464
+
2465
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
2466
+ vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
2467
+ vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
2468
+ vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
2469
+
2470
+ vsumf0 = vec_madd(vec_ctf(vsumi8, 0), vec_mul(vd, vec_splats(IQ1S_DELTA)), vsumf0);
2471
+ }
2472
+
2473
+ vsumf0 = vec_add(vsumf0, vsumf2);
2474
+ vsumf1 = vec_add(vsumf1, vsumf3);
2475
+
2476
+ vsumf0 = vec_add(vsumf0, vsumf1);
2477
+
2478
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
2479
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
2480
+
2481
+ *s = vec_extract(vsumf0, 0);
2482
+
2483
+ #else
2484
+
2485
+ float sumf = 0;
2486
+ for (int i = 0; i < nb; i++) {
2487
+
2488
+ const int8_t * q8 = y[i].qs;
2489
+ const uint8_t * qs = x[i].qs;
2490
+ const uint16_t * qh = x[i].qh;
2491
+
2492
+ int sumi = 0, sumi1 = 0;
2493
+ for (int ib = 0; ib < QK_K/32; ++ib) {
2494
+ const int ls = 2*((qh[ib] >> 12) & 7) + 1;
2495
+ const int delta = qh[ib] & 0x8000 ? -1 : 1;
2496
+ int lsum = 0;
2497
+ for (int l = 0; l < 4; ++l) {
2498
+ const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8)));
2499
+ for (int j = 0; j < 8; ++j) {
2500
+ lsum += q8[j] * grid[j];
2501
+ }
2502
+ q8 += 8;
2503
+ }
2504
+ sumi += ls * lsum;
2505
+ sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]);
2506
+ qs += 4;
2507
+ }
2508
+
2509
+ sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1);
2510
+ }
2511
+
2512
+ *s = sumf;
2513
+
2514
+ #endif
2515
+ }
2516
+
2517
+ void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
2518
+ assert(nrc == 1);
2519
+ UNUSED(nrc);
2520
+ UNUSED(bx);
2521
+ UNUSED(by);
2522
+ UNUSED(bs);
2523
+ assert(n % QK4_NL == 0);
2524
+ static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same");
2525
+
2526
+ const block_iq4_nl * GGML_RESTRICT x = vx;
2527
+ const block_q8_0 * GGML_RESTRICT y = vy;
2528
+
2529
+ const int nb = n / QK4_NL;
2530
+
2531
+ int ib = 0;
2532
+ float sumf = 0;
2533
+
2534
+ #if defined(__POWER9_VECTOR__)
2535
+ const vector signed char lowMask = vec_splats((signed char)0xF);
2536
+ const vector signed int v0 = vec_splats((int32_t)0);
2537
+ const vector unsigned char v4 = vec_splats((unsigned char)0x4);
2538
+
2539
+ vector float vsumf0 = vec_splats(0.0f);
2540
+ vector float vsumf1 = vec_splats(0.0f);
2541
+
2542
+ const vector signed char values = vec_xl( 0, kvalues_iq4nl);
2543
+
2544
+ #pragma GCC unroll 4
2545
+ for (; ib < nb; ++ib) {
2546
+ __builtin_prefetch(x[ib].qs, 0, 1);
2547
+ __builtin_prefetch(y[ib].qs, 0, 1);
2548
+
2549
+
2550
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d));
2551
+ vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d));
2552
+ vector float vd = vec_mul(vxd, vyd);
2553
+
2554
+ vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs);
2555
+ vector signed char q4x0 = vec_and(qxs, lowMask);
2556
+ vector signed char q4x1 = vec_sr(qxs, v4);
2557
+
2558
+ q4x0 = vec_perm(values, values, (vector unsigned char)q4x0);
2559
+ q4x1 = vec_perm(values, values, (vector unsigned char)q4x1);
2560
+
2561
+ vector signed char q8y0 = vec_xl( 0, y[ib].qs);
2562
+ vector signed char q8y1 = vec_xl(16, y[ib].qs);
2563
+
2564
+ vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0));
2565
+ vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1));
2566
+
2567
+ vector signed int vsumi0 = v0;
2568
+ vector signed int vsumi1 = v0;
2569
+
2570
+ vsumi0 = vec_sum4s(qv0, vsumi0);
2571
+ vsumi1 = vec_sum4s(qv1, vsumi1);
2572
+
2573
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
2574
+ vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
2575
+ }
2576
+
2577
+ vsumf0 = vec_add(vsumf0, vsumf1);
2578
+
2579
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
2580
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
2581
+
2582
+ sumf = vec_extract(vsumf0, 0);
2583
+
2584
+ #endif
2585
+ for (; ib < nb; ++ib) {
2586
+ const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d);
2587
+ int sumi1 = 0, sumi2 = 0;
2588
+ for (int j = 0; j < QK4_NL/2; ++j) {
2589
+ sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf];
2590
+ sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4];
2591
+ }
2592
+ sumf += d * (sumi1 + sumi2);
2593
+ }
2594
+ *s = sumf;
2595
+ }
2596
+
2597
+ void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
2598
+ assert(nrc == 1);
2599
+ UNUSED(nrc);
2600
+ UNUSED(bx);
2601
+ UNUSED(by);
2602
+ UNUSED(bs);
2603
+ assert(n % QK_K == 0);
2604
+
2605
+ const block_iq4_xs * GGML_RESTRICT x = vx;
2606
+ const block_q8_K * GGML_RESTRICT y = vy;
2607
+
2608
+ const int nb = n / QK_K;
2609
+
2610
+ #if defined(__POWER9_VECTOR__)
2611
+ const vector signed char lowMask = vec_splats((signed char)0xF);
2612
+ const vector int v0 = vec_splats((int32_t)0);
2613
+ const vector unsigned char v4 = vec_splats((unsigned char)0x4);
2614
+
2615
+ vector float vsumf0 = vec_splats(0.0f);
2616
+ vector float vsumf1 = vec_splats(0.0f);
2617
+ vector float vsumf2 = vec_splats(0.0f);
2618
+ vector float vsumf3 = vec_splats(0.0f);
2619
+
2620
+ const vector signed char values = vec_xl( 0, kvalues_iq4nl);
2621
+
2622
+ for (int ibl = 0; ibl < nb; ++ibl) {
2623
+
2624
+ vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ibl].d));
2625
+ vector float vyd = vec_splats(y[ibl].d);
2626
+ vector float vd = vec_mul(vxd, vyd);
2627
+
2628
+ vector signed int vsumi0 = v0;
2629
+ vector signed int vsumi1 = v0;
2630
+ vector signed int vsumi2 = v0;
2631
+ vector signed int vsumi3 = v0;
2632
+
2633
+ uint16_t h = x[ibl].scales_h;
2634
+
2635
+ const uint8_t * GGML_RESTRICT q4 = x[ibl].qs;
2636
+ const uint8_t * GGML_RESTRICT sc = x[ibl].scales_l;
2637
+ const int8_t * GGML_RESTRICT q8 = y[ibl].qs;
2638
+
2639
+ for (int ib = 0; ib < QK_K/64; ib ++ ) {
2640
+ __builtin_prefetch(q4, 0, 1);
2641
+ __builtin_prefetch(q8, 0, 1);
2642
+
2643
+ vector signed char qxs0 = (vector signed char)vec_xl( 0, q4);
2644
+ vector signed char qxs1 = (vector signed char)vec_xl(16, q4);
2645
+ q4 += 32;
2646
+
2647
+ vector signed char q4x00 = (vector signed char)vec_and(qxs0, lowMask);
2648
+ vector signed char q4x01 = (vector signed char)vec_sr(qxs0, v4);
2649
+ vector signed char q4x10 = (vector signed char)vec_and(qxs1, lowMask);
2650
+ vector signed char q4x11 = (vector signed char)vec_sr(qxs1, v4);
2651
+
2652
+ q4x00 = vec_perm(values, values, (vector unsigned char)q4x00);
2653
+ q4x01 = vec_perm(values, values, (vector unsigned char)q4x01);
2654
+ q4x10 = vec_perm(values, values, (vector unsigned char)q4x10);
2655
+ q4x11 = vec_perm(values, values, (vector unsigned char)q4x11);
2656
+
2657
+ vector signed char q8y0 = vec_xl( 0, q8);
2658
+ vector signed char q8y1 = vec_xl(16, q8);
2659
+ vector signed char q8y2 = vec_xl(32, q8);
2660
+ vector signed char q8y3 = vec_xl(48, q8);
2661
+ q8 += 64;
2662
+
2663
+ vector signed short qv0 = vec_add(vec_mule(q4x00, q8y0), vec_mulo(q4x00, q8y0));
2664
+ vector signed short qv1 = vec_add(vec_mule(q4x01, q8y1), vec_mulo(q4x01, q8y1));
2665
+ vector signed short qv2 = vec_add(vec_mule(q4x10, q8y2), vec_mulo(q4x10, q8y2));
2666
+ vector signed short qv3 = vec_add(vec_mule(q4x11, q8y3), vec_mulo(q4x11, q8y3));
2667
+
2668
+ const uint16_t ls0 = (uint16_t)(((sc[0] & 0xf) | ((h << 4) & 0x30)) - 32);
2669
+ const uint16_t ls1 = (uint16_t)(((sc[0] >> 4) | ((h << 2) & 0x30)) - 32);
2670
+ h >>= 4;
2671
+ sc ++;
2672
+
2673
+ vector signed short vscales01 = vec_splats((int16_t)ls0);
2674
+ vector signed short vscales23 = vec_splats((int16_t)ls1);
2675
+
2676
+ vsumi0 = vec_msum(qv0, vscales01, vsumi0);
2677
+ vsumi1 = vec_msum(qv1, vscales01, vsumi1);
2678
+ vsumi2 = vec_msum(qv2, vscales23, vsumi2);
2679
+ vsumi3 = vec_msum(qv3, vscales23, vsumi3);
2680
+ }
2681
+
2682
+ vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0);
2683
+ vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1);
2684
+ vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2);
2685
+ vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3);
2686
+ }
2687
+
2688
+ vsumf0 = vec_add(vsumf0, vsumf2);
2689
+ vsumf1 = vec_add(vsumf1, vsumf3);
2690
+
2691
+ vsumf0 = vec_add(vsumf0, vsumf1);
2692
+
2693
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
2694
+ vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
2695
+
2696
+ *s = vec_extract(vsumf0, 0);
2697
+
2698
+ #else
2699
+ float sumf = 0;
2700
+ for (int ibl = 0; ibl < nb; ++ibl) {
2701
+ const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d;
2702
+ uint16_t h = x[ibl].scales_h;
2703
+ const uint8_t * qs = x[ibl].qs;
2704
+ const int8_t * q8 = y[ibl].qs;
2705
+ for (int ib = 0; ib < QK_K/32; ib += 2) {
2706
+ const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30);
2707
+ const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30);
2708
+ h >>= 4;
2709
+ const float d1 = d4d8*(ls1 - 32);
2710
+ const float d2 = d4d8*(ls2 - 32);
2711
+ int sumi1 = 0, sumi2 = 0;
2712
+ for (int j = 0; j < 16; ++j) {
2713
+ sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
2714
+ sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
2715
+ }
2716
+ sumf += d1 * (sumi1 + sumi2);
2717
+ qs += 16;
2718
+ q8 += 32;
2719
+ sumi1 = sumi2 = 0;
2720
+ for (int j = 0; j < 16; ++j) {
2721
+ sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
2722
+ sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
2723
+ }
2724
+ sumf += d2 * (sumi1 + sumi2);
2725
+ qs += 16;
2726
+ q8 += 32;
2727
+ }
2728
+ }
2729
+ *s = sumf;
2730
+ #endif
2731
+ }
2732
+