whispercpp 1.3.4 → 1.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (630) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +60 -43
  3. data/ext/extconf.rb +2 -2
  4. data/ext/ruby_whisper.c +14 -2
  5. data/ext/ruby_whisper.h +39 -0
  6. data/ext/ruby_whisper_context.c +22 -22
  7. data/ext/ruby_whisper_model.c +12 -12
  8. data/ext/ruby_whisper_params.c +47 -23
  9. data/ext/ruby_whisper_segment.c +84 -19
  10. data/ext/ruby_whisper_token.c +351 -0
  11. data/ext/ruby_whisper_transcribe.cpp +1 -1
  12. data/ext/ruby_whisper_vad_context.c +75 -0
  13. data/ext/ruby_whisper_vad_context_detect.cpp +50 -0
  14. data/ext/ruby_whisper_vad_segment.c +139 -0
  15. data/ext/ruby_whisper_vad_segments.c +106 -0
  16. data/ext/sources/CMakeLists.txt +4 -1
  17. data/ext/sources/bindings/javascript/package.json +1 -1
  18. data/ext/sources/cmake/arm64-apple-clang.cmake +16 -0
  19. data/ext/sources/cmake/arm64-windows-llvm.cmake +16 -0
  20. data/ext/sources/cmake/riscv64-spacemit-linux-gnu-gcc.cmake +29 -0
  21. data/ext/sources/cmake/x64-windows-llvm.cmake +5 -0
  22. data/ext/sources/examples/addon.node/vad-example.js +2 -2
  23. data/ext/sources/examples/cli/cli.cpp +121 -112
  24. data/ext/sources/examples/lsp/CMakeLists.txt +2 -1
  25. data/ext/sources/examples/quantize/CMakeLists.txt +2 -1
  26. data/ext/sources/examples/server/server.cpp +10 -11
  27. data/ext/sources/examples/talk-llama/CMakeLists.txt +5 -1
  28. data/ext/sources/examples/talk-llama/llama-adapter.cpp +12 -3
  29. data/ext/sources/examples/talk-llama/llama-adapter.h +7 -1
  30. data/ext/sources/examples/talk-llama/llama-arch.cpp +2046 -1974
  31. data/ext/sources/examples/talk-llama/llama-arch.h +67 -2
  32. data/ext/sources/examples/talk-llama/llama-batch.cpp +75 -33
  33. data/ext/sources/examples/talk-llama/llama-batch.h +17 -4
  34. data/ext/sources/examples/talk-llama/llama-chat.cpp +79 -3
  35. data/ext/sources/examples/talk-llama/llama-chat.h +4 -0
  36. data/ext/sources/examples/talk-llama/llama-context.cpp +775 -78
  37. data/ext/sources/examples/talk-llama/llama-context.h +57 -9
  38. data/ext/sources/examples/talk-llama/llama-cparams.h +1 -0
  39. data/ext/sources/examples/talk-llama/llama-grammar.cpp +288 -53
  40. data/ext/sources/examples/talk-llama/llama-grammar.h +22 -1
  41. data/ext/sources/examples/talk-llama/llama-graph.cpp +381 -64
  42. data/ext/sources/examples/talk-llama/llama-graph.h +103 -13
  43. data/ext/sources/examples/talk-llama/llama-hparams.cpp +26 -2
  44. data/ext/sources/examples/talk-llama/llama-hparams.h +41 -10
  45. data/ext/sources/examples/talk-llama/llama-impl.cpp +7 -3
  46. data/ext/sources/examples/talk-llama/llama-impl.h +1 -1
  47. data/ext/sources/examples/talk-llama/llama-kv-cache-iswa.cpp +5 -3
  48. data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +145 -65
  49. data/ext/sources/examples/talk-llama/llama-kv-cache.h +22 -7
  50. data/ext/sources/examples/talk-llama/llama-kv-cells.h +44 -2
  51. data/ext/sources/examples/talk-llama/llama-memory-hybrid.cpp +12 -10
  52. data/ext/sources/examples/talk-llama/llama-memory-recurrent.cpp +32 -19
  53. data/ext/sources/examples/talk-llama/llama-memory-recurrent.h +2 -2
  54. data/ext/sources/examples/talk-llama/llama-mmap.cpp +172 -37
  55. data/ext/sources/examples/talk-llama/llama-mmap.h +8 -3
  56. data/ext/sources/examples/talk-llama/llama-model-loader.cpp +91 -9
  57. data/ext/sources/examples/talk-llama/llama-model-loader.h +6 -0
  58. data/ext/sources/examples/talk-llama/llama-model-saver.cpp +3 -0
  59. data/ext/sources/examples/talk-llama/llama-model.cpp +1529 -13134
  60. data/ext/sources/examples/talk-llama/llama-model.h +44 -3
  61. data/ext/sources/examples/talk-llama/llama-quant.cpp +8 -23
  62. data/ext/sources/examples/talk-llama/llama-sampling.cpp +1294 -198
  63. data/ext/sources/examples/talk-llama/llama-sampling.h +19 -7
  64. data/ext/sources/examples/talk-llama/llama-vocab.cpp +133 -37
  65. data/ext/sources/examples/talk-llama/llama-vocab.h +45 -40
  66. data/ext/sources/examples/talk-llama/llama.cpp +729 -2
  67. data/ext/sources/examples/talk-llama/llama.h +152 -14
  68. data/ext/sources/examples/talk-llama/models/afmoe.cpp +191 -0
  69. data/ext/sources/examples/talk-llama/models/apertus.cpp +125 -0
  70. data/ext/sources/examples/talk-llama/models/arcee.cpp +135 -0
  71. data/ext/sources/examples/talk-llama/models/arctic.cpp +138 -0
  72. data/ext/sources/examples/talk-llama/models/arwkv7.cpp +86 -0
  73. data/ext/sources/examples/talk-llama/models/baichuan.cpp +122 -0
  74. data/ext/sources/examples/talk-llama/models/bailingmoe.cpp +144 -0
  75. data/ext/sources/examples/talk-llama/models/bailingmoe2.cpp +135 -0
  76. data/ext/sources/examples/talk-llama/models/bert.cpp +178 -0
  77. data/ext/sources/examples/talk-llama/models/bitnet.cpp +160 -0
  78. data/ext/sources/examples/talk-llama/models/bloom.cpp +101 -0
  79. data/ext/sources/examples/talk-llama/models/chameleon.cpp +178 -0
  80. data/ext/sources/examples/talk-llama/models/chatglm.cpp +132 -0
  81. data/ext/sources/examples/talk-llama/models/codeshell.cpp +111 -0
  82. data/ext/sources/examples/talk-llama/models/cogvlm.cpp +102 -0
  83. data/ext/sources/examples/talk-llama/models/cohere2-iswa.cpp +134 -0
  84. data/ext/sources/examples/talk-llama/models/command-r.cpp +122 -0
  85. data/ext/sources/examples/talk-llama/models/dbrx.cpp +123 -0
  86. data/ext/sources/examples/talk-llama/models/deci.cpp +135 -0
  87. data/ext/sources/examples/talk-llama/models/deepseek.cpp +144 -0
  88. data/ext/sources/examples/talk-llama/models/deepseek2.cpp +259 -0
  89. data/ext/sources/examples/talk-llama/models/dots1.cpp +134 -0
  90. data/ext/sources/examples/talk-llama/models/dream.cpp +105 -0
  91. data/ext/sources/examples/talk-llama/models/ernie4-5-moe.cpp +150 -0
  92. data/ext/sources/examples/talk-llama/models/ernie4-5.cpp +110 -0
  93. data/ext/sources/examples/talk-llama/models/exaone.cpp +114 -0
  94. data/ext/sources/examples/talk-llama/models/exaone4.cpp +123 -0
  95. data/ext/sources/examples/talk-llama/models/falcon-h1.cpp +113 -0
  96. data/ext/sources/examples/talk-llama/models/falcon.cpp +120 -0
  97. data/ext/sources/examples/talk-llama/models/gemma-embedding.cpp +116 -0
  98. data/ext/sources/examples/talk-llama/models/gemma.cpp +112 -0
  99. data/ext/sources/examples/talk-llama/models/gemma2-iswa.cpp +128 -0
  100. data/ext/sources/examples/talk-llama/models/gemma3.cpp +155 -0
  101. data/ext/sources/examples/talk-llama/models/gemma3n-iswa.cpp +384 -0
  102. data/ext/sources/examples/talk-llama/models/glm4-moe.cpp +170 -0
  103. data/ext/sources/examples/talk-llama/models/glm4.cpp +150 -0
  104. data/ext/sources/examples/talk-llama/models/gpt2.cpp +105 -0
  105. data/ext/sources/examples/talk-llama/models/gptneox.cpp +144 -0
  106. data/ext/sources/examples/talk-llama/models/granite-hybrid.cpp +196 -0
  107. data/ext/sources/examples/talk-llama/models/granite.cpp +211 -0
  108. data/ext/sources/examples/talk-llama/models/graph-context-mamba.cpp +283 -0
  109. data/ext/sources/examples/talk-llama/models/grok.cpp +159 -0
  110. data/ext/sources/examples/talk-llama/models/grovemoe.cpp +141 -0
  111. data/ext/sources/examples/talk-llama/models/hunyuan-dense.cpp +132 -0
  112. data/ext/sources/examples/talk-llama/models/hunyuan-moe.cpp +154 -0
  113. data/ext/sources/examples/talk-llama/models/internlm2.cpp +120 -0
  114. data/ext/sources/examples/talk-llama/models/jais.cpp +86 -0
  115. data/ext/sources/examples/talk-llama/models/jamba.cpp +106 -0
  116. data/ext/sources/examples/talk-llama/models/lfm2.cpp +175 -0
  117. data/ext/sources/examples/talk-llama/models/llada-moe.cpp +122 -0
  118. data/ext/sources/examples/talk-llama/models/llada.cpp +99 -0
  119. data/ext/sources/examples/talk-llama/models/llama-iswa.cpp +178 -0
  120. data/ext/sources/examples/talk-llama/models/llama.cpp +168 -0
  121. data/ext/sources/examples/talk-llama/models/maincoder.cpp +117 -0
  122. data/ext/sources/examples/talk-llama/models/mamba.cpp +55 -0
  123. data/ext/sources/examples/talk-llama/models/mimo2-iswa.cpp +123 -0
  124. data/ext/sources/examples/talk-llama/models/minicpm3.cpp +199 -0
  125. data/ext/sources/examples/talk-llama/models/minimax-m2.cpp +124 -0
  126. data/ext/sources/examples/talk-llama/models/mistral3.cpp +160 -0
  127. data/ext/sources/examples/talk-llama/models/models.h +569 -0
  128. data/ext/sources/examples/talk-llama/models/modern-bert.cpp +116 -0
  129. data/ext/sources/examples/talk-llama/models/mpt.cpp +126 -0
  130. data/ext/sources/examples/talk-llama/models/nemotron-h.cpp +150 -0
  131. data/ext/sources/examples/talk-llama/models/nemotron.cpp +122 -0
  132. data/ext/sources/examples/talk-llama/models/neo-bert.cpp +104 -0
  133. data/ext/sources/examples/talk-llama/models/olmo.cpp +121 -0
  134. data/ext/sources/examples/talk-llama/models/olmo2.cpp +150 -0
  135. data/ext/sources/examples/talk-llama/models/olmoe.cpp +124 -0
  136. data/ext/sources/examples/talk-llama/models/openai-moe-iswa.cpp +127 -0
  137. data/ext/sources/examples/talk-llama/models/openelm.cpp +124 -0
  138. data/ext/sources/examples/talk-llama/models/orion.cpp +123 -0
  139. data/ext/sources/examples/talk-llama/models/pangu-embedded.cpp +121 -0
  140. data/ext/sources/examples/talk-llama/models/phi2.cpp +121 -0
  141. data/ext/sources/examples/talk-llama/models/phi3.cpp +152 -0
  142. data/ext/sources/examples/talk-llama/models/plamo.cpp +110 -0
  143. data/ext/sources/examples/talk-llama/models/plamo2.cpp +316 -0
  144. data/ext/sources/examples/talk-llama/models/plamo3.cpp +128 -0
  145. data/ext/sources/examples/talk-llama/models/plm.cpp +168 -0
  146. data/ext/sources/examples/talk-llama/models/qwen.cpp +108 -0
  147. data/ext/sources/examples/talk-llama/models/qwen2.cpp +126 -0
  148. data/ext/sources/examples/talk-llama/models/qwen2moe.cpp +151 -0
  149. data/ext/sources/examples/talk-llama/models/qwen2vl.cpp +117 -0
  150. data/ext/sources/examples/talk-llama/models/qwen3.cpp +117 -0
  151. data/ext/sources/examples/talk-llama/models/qwen3moe.cpp +124 -0
  152. data/ext/sources/examples/talk-llama/models/qwen3next.cpp +873 -0
  153. data/ext/sources/examples/talk-llama/models/qwen3vl-moe.cpp +149 -0
  154. data/ext/sources/examples/talk-llama/models/qwen3vl.cpp +141 -0
  155. data/ext/sources/examples/talk-llama/models/refact.cpp +94 -0
  156. data/ext/sources/examples/talk-llama/models/rnd1.cpp +126 -0
  157. data/ext/sources/examples/talk-llama/models/rwkv6-base.cpp +162 -0
  158. data/ext/sources/examples/talk-llama/models/rwkv6.cpp +94 -0
  159. data/ext/sources/examples/talk-llama/models/rwkv6qwen2.cpp +86 -0
  160. data/ext/sources/examples/talk-llama/models/rwkv7-base.cpp +135 -0
  161. data/ext/sources/examples/talk-llama/models/rwkv7.cpp +90 -0
  162. data/ext/sources/examples/talk-llama/models/seed-oss.cpp +124 -0
  163. data/ext/sources/examples/talk-llama/models/smallthinker.cpp +126 -0
  164. data/ext/sources/examples/talk-llama/models/smollm3.cpp +128 -0
  165. data/ext/sources/examples/talk-llama/models/stablelm.cpp +146 -0
  166. data/ext/sources/examples/talk-llama/models/starcoder.cpp +100 -0
  167. data/ext/sources/examples/talk-llama/models/starcoder2.cpp +121 -0
  168. data/ext/sources/examples/talk-llama/models/t5-dec.cpp +166 -0
  169. data/ext/sources/examples/talk-llama/models/t5-enc.cpp +96 -0
  170. data/ext/sources/examples/talk-llama/models/wavtokenizer-dec.cpp +149 -0
  171. data/ext/sources/examples/talk-llama/models/xverse.cpp +108 -0
  172. data/ext/sources/examples/talk-llama/unicode.cpp +102 -16
  173. data/ext/sources/examples/vad-speech-segments/CMakeLists.txt +1 -1
  174. data/ext/sources/examples/whisper.wasm/index-tmpl.html +1 -1
  175. data/ext/sources/ggml/CMakeLists.txt +82 -54
  176. data/ext/sources/ggml/include/ggml-alloc.h +9 -0
  177. data/ext/sources/ggml/include/ggml-backend.h +4 -1
  178. data/ext/sources/ggml/include/ggml-cpu.h +1 -0
  179. data/ext/sources/ggml/include/ggml-hexagon.h +19 -0
  180. data/ext/sources/ggml/include/ggml-rpc.h +8 -11
  181. data/ext/sources/ggml/include/ggml-zendnn.h +22 -0
  182. data/ext/sources/ggml/include/ggml.h +190 -12
  183. data/ext/sources/ggml/src/CMakeLists.txt +82 -11
  184. data/ext/sources/ggml/src/ggml-alloc.c +124 -41
  185. data/ext/sources/ggml/src/ggml-backend-impl.h +1 -4
  186. data/ext/sources/ggml/src/ggml-backend-reg.cpp +27 -3
  187. data/ext/sources/ggml/src/ggml-backend.cpp +71 -21
  188. data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +17 -3
  189. data/ext/sources/ggml/src/ggml-blas/ggml-blas.cpp +5 -9
  190. data/ext/sources/ggml/src/ggml-cann/acl_tensor.cpp +57 -45
  191. data/ext/sources/ggml/src/ggml-cann/acl_tensor.h +138 -47
  192. data/ext/sources/ggml/src/ggml-cann/aclnn_ops.cpp +2179 -1696
  193. data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +238 -317
  194. data/ext/sources/ggml/src/ggml-cann/common.h +283 -208
  195. data/ext/sources/ggml/src/ggml-cann/ggml-cann.cpp +626 -776
  196. data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +156 -86
  197. data/ext/sources/ggml/src/ggml-cpu/amx/amx.cpp +1 -0
  198. data/ext/sources/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp +4 -0
  199. data/ext/sources/ggml/src/ggml-cpu/arch/arm/quants.c +428 -26
  200. data/ext/sources/ggml/src/ggml-cpu/arch/arm/repack.cpp +1004 -0
  201. data/ext/sources/ggml/src/ggml-cpu/arch/loongarch/quants.c +4 -5
  202. data/ext/sources/ggml/src/ggml-cpu/arch/riscv/cpu-feats.cpp +38 -0
  203. data/ext/sources/ggml/src/ggml-cpu/arch/riscv/quants.c +108 -49
  204. data/ext/sources/ggml/src/ggml-cpu/arch/s390/cpu-feats.cpp +50 -0
  205. data/ext/sources/ggml/src/ggml-cpu/arch/x86/repack.cpp +6 -6
  206. data/ext/sources/ggml/src/ggml-cpu/arch-fallback.h +50 -2
  207. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-impl.h +5 -3
  208. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +195 -71
  209. data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.cpp +4 -0
  210. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +573 -106
  211. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.h +33 -44
  212. data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +298 -112
  213. data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm-ppc.h +333 -0
  214. data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +819 -125
  215. data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.h +6 -0
  216. data/ext/sources/ggml/src/ggml-cpu/ops.cpp +708 -431
  217. data/ext/sources/ggml/src/ggml-cpu/ops.h +5 -4
  218. data/ext/sources/ggml/src/ggml-cpu/repack.cpp +671 -31
  219. data/ext/sources/ggml/src/ggml-cpu/repack.h +14 -0
  220. data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +41 -43
  221. data/ext/sources/ggml/src/ggml-cpu/spacemit/ime.cpp +3 -2
  222. data/ext/sources/ggml/src/ggml-cpu/unary-ops.cpp +151 -0
  223. data/ext/sources/ggml/src/ggml-cpu/unary-ops.h +7 -0
  224. data/ext/sources/ggml/src/ggml-cpu/vec.cpp +124 -1
  225. data/ext/sources/ggml/src/ggml-cpu/vec.h +261 -146
  226. data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +72 -1
  227. data/ext/sources/ggml/src/ggml-cuda/argmax.cu +2 -2
  228. data/ext/sources/ggml/src/ggml-cuda/argsort.cu +123 -6
  229. data/ext/sources/ggml/src/ggml-cuda/argsort.cuh +16 -0
  230. data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +1 -1
  231. data/ext/sources/ggml/src/ggml-cuda/common.cuh +353 -80
  232. data/ext/sources/ggml/src/ggml-cuda/convert.cuh +10 -0
  233. data/ext/sources/ggml/src/ggml-cuda/cpy-utils.cuh +1 -1
  234. data/ext/sources/ggml/src/ggml-cuda/cpy.cu +339 -246
  235. data/ext/sources/ggml/src/ggml-cuda/cpy.cuh +1 -5
  236. data/ext/sources/ggml/src/ggml-cuda/cumsum.cu +307 -0
  237. data/ext/sources/ggml/src/ggml-cuda/cumsum.cuh +5 -0
  238. data/ext/sources/ggml/src/ggml-cuda/diag.cu +77 -0
  239. data/ext/sources/ggml/src/ggml-cuda/diag.cuh +5 -0
  240. data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +31 -21
  241. data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +663 -596
  242. data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cu +35 -741
  243. data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cuh +1241 -0
  244. data/ext/sources/ggml/src/ggml-cuda/fattn-vec.cuh +30 -37
  245. data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +14 -13
  246. data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +48 -0
  247. data/ext/sources/ggml/src/ggml-cuda/fattn.cu +83 -37
  248. data/ext/sources/ggml/src/ggml-cuda/fill.cu +37 -0
  249. data/ext/sources/ggml/src/ggml-cuda/fill.cuh +3 -0
  250. data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +1155 -164
  251. data/ext/sources/ggml/src/ggml-cuda/mean.cu +5 -4
  252. data/ext/sources/ggml/src/ggml-cuda/mma.cuh +741 -48
  253. data/ext/sources/ggml/src/ggml-cuda/mmf.cu +60 -12
  254. data/ext/sources/ggml/src/ggml-cuda/mmf.cuh +381 -42
  255. data/ext/sources/ggml/src/ggml-cuda/mmid.cu +164 -0
  256. data/ext/sources/ggml/src/ggml-cuda/mmid.cuh +5 -0
  257. data/ext/sources/ggml/src/ggml-cuda/mmq.cu +69 -176
  258. data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +498 -171
  259. data/ext/sources/ggml/src/ggml-cuda/mmvf.cu +375 -79
  260. data/ext/sources/ggml/src/ggml-cuda/mmvf.cuh +3 -2
  261. data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +241 -95
  262. data/ext/sources/ggml/src/ggml-cuda/mmvq.cuh +1 -1
  263. data/ext/sources/ggml/src/ggml-cuda/pad.cu +64 -33
  264. data/ext/sources/ggml/src/ggml-cuda/quantize.cu +151 -0
  265. data/ext/sources/ggml/src/ggml-cuda/quantize.cuh +14 -0
  266. data/ext/sources/ggml/src/ggml-cuda/rope.cu +192 -77
  267. data/ext/sources/ggml/src/ggml-cuda/rope.cuh +2 -0
  268. data/ext/sources/ggml/src/ggml-cuda/set-rows.cu +101 -47
  269. data/ext/sources/ggml/src/ggml-cuda/set.cu +39 -0
  270. data/ext/sources/ggml/src/ggml-cuda/set.cuh +7 -0
  271. data/ext/sources/ggml/src/ggml-cuda/softmax.cu +203 -6
  272. data/ext/sources/ggml/src/ggml-cuda/solve_tri.cu +275 -0
  273. data/ext/sources/ggml/src/ggml-cuda/solve_tri.cuh +3 -0
  274. data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +14 -20
  275. data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cu +49 -84
  276. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq112-dv112.cu +5 -0
  277. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq128-dv128.cu +5 -0
  278. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq256-dv256.cu +5 -0
  279. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq40-dv40.cu +5 -0
  280. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq576-dv512.cu +5 -0
  281. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq64-dv64.cu +5 -0
  282. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq72-dv72.cu +5 -0
  283. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq80-dv80.cu +5 -0
  284. data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq96-dv96.cu +5 -0
  285. data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +19 -1
  286. data/ext/sources/ggml/src/ggml-cuda/top-k.cu +96 -0
  287. data/ext/sources/ggml/src/ggml-cuda/top-k.cuh +3 -0
  288. data/ext/sources/ggml/src/ggml-cuda/topk-moe.cu +168 -76
  289. data/ext/sources/ggml/src/ggml-cuda/topk-moe.cuh +11 -4
  290. data/ext/sources/ggml/src/ggml-cuda/tri.cu +136 -0
  291. data/ext/sources/ggml/src/ggml-cuda/tri.cuh +5 -0
  292. data/ext/sources/ggml/src/ggml-cuda/unary.cu +105 -11
  293. data/ext/sources/ggml/src/ggml-cuda/unary.cuh +36 -0
  294. data/ext/sources/ggml/src/ggml-cuda/upscale.cu +163 -7
  295. data/ext/sources/ggml/src/ggml-cuda/vendors/cuda.h +4 -0
  296. data/ext/sources/ggml/src/ggml-cuda/vendors/hip.h +12 -1
  297. data/ext/sources/ggml/src/ggml-cuda/vendors/musa.h +6 -0
  298. data/ext/sources/ggml/src/ggml-hexagon/CMakeLists.txt +80 -0
  299. data/ext/sources/ggml/src/ggml-hexagon/ggml-hexagon.cpp +3151 -0
  300. data/ext/sources/ggml/src/ggml-hexagon/htp/CMakeLists.txt +44 -0
  301. data/ext/sources/ggml/src/ggml-hexagon/htp/act-ops.c +682 -0
  302. data/ext/sources/ggml/src/ggml-hexagon/htp/binary-ops.c +360 -0
  303. data/ext/sources/ggml/src/ggml-hexagon/htp/cmake-toolchain.cmake +157 -0
  304. data/ext/sources/ggml/src/ggml-hexagon/htp/flash-attn-ops.c +566 -0
  305. data/ext/sources/ggml/src/ggml-hexagon/htp/get-rows-ops.c +112 -0
  306. data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ctx.h +35 -0
  307. data/ext/sources/ggml/src/ggml-hexagon/htp/htp-dma.c +63 -0
  308. data/ext/sources/ggml/src/ggml-hexagon/htp/htp-dma.h +157 -0
  309. data/ext/sources/ggml/src/ggml-hexagon/htp/htp-msg.h +165 -0
  310. data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ops.h +92 -0
  311. data/ext/sources/ggml/src/ggml-hexagon/htp/htp_iface.idl +16 -0
  312. data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-exp.c +94 -0
  313. data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-inverse.c +72 -0
  314. data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sigmoid.c +49 -0
  315. data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-utils.c +1020 -0
  316. data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-utils.h +1353 -0
  317. data/ext/sources/ggml/src/ggml-hexagon/htp/main.c +1001 -0
  318. data/ext/sources/ggml/src/ggml-hexagon/htp/matmul-ops.c +2503 -0
  319. data/ext/sources/ggml/src/ggml-hexagon/htp/ops-utils.h +149 -0
  320. data/ext/sources/ggml/src/ggml-hexagon/htp/rope-ops.c +487 -0
  321. data/ext/sources/ggml/src/ggml-hexagon/htp/set-rows-ops.c +168 -0
  322. data/ext/sources/ggml/src/ggml-hexagon/htp/softmax-ops.c +402 -0
  323. data/ext/sources/ggml/src/ggml-hexagon/htp/unary-ops.c +287 -0
  324. data/ext/sources/ggml/src/ggml-hexagon/htp/worker-pool.c +297 -0
  325. data/ext/sources/ggml/src/ggml-hexagon/htp/worker-pool.h +57 -0
  326. data/ext/sources/ggml/src/ggml-hexagon/htp-utils.c +454 -0
  327. data/ext/sources/ggml/src/ggml-hexagon/htp-utils.h +221 -0
  328. data/ext/sources/ggml/src/ggml-hexagon/op-desc.h +153 -0
  329. data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +8 -13
  330. data/ext/sources/ggml/src/ggml-impl.h +67 -6
  331. data/ext/sources/ggml/src/ggml-metal/ggml-metal-common.cpp +2 -2
  332. data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.m +29 -20
  333. data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.cpp +652 -285
  334. data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.h +103 -56
  335. data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.m +496 -118
  336. data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +231 -9
  337. data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.cpp +1227 -224
  338. data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.h +12 -0
  339. data/ext/sources/ggml/src/ggml-metal/ggml-metal.cpp +14 -8
  340. data/ext/sources/ggml/src/ggml-metal/ggml-metal.metal +1972 -704
  341. data/ext/sources/ggml/src/ggml-musa/CMakeLists.txt +3 -1
  342. data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +11 -0
  343. data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +1430 -120
  344. data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +63 -0
  345. data/ext/sources/ggml/src/ggml-opencl/kernels/expm1.cl +82 -0
  346. data/ext/sources/ggml/src/ggml-opencl/kernels/fill.cl +17 -0
  347. data/ext/sources/ggml/src/ggml-opencl/kernels/flash_attn_f32.cl +4 -3
  348. data/ext/sources/ggml/src/ggml-opencl/kernels/gemm_moe_mxfp4_f32.cl +162 -0
  349. data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_moe_mxfp4_f32.cl +156 -0
  350. data/ext/sources/ggml/src/ggml-opencl/kernels/get_rows.cl +36 -12
  351. data/ext/sources/ggml/src/ggml-opencl/kernels/mean.cl +39 -0
  352. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f16_f32_kq_kqv.cl +273 -0
  353. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f16_f32_l4_lm.cl +24 -10
  354. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f32_f32_l4_lm.cl +24 -10
  355. data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q8_0_f32_l4_lm.cl +154 -0
  356. data/ext/sources/ggml/src/ggml-opencl/kernels/pad.cl +29 -20
  357. data/ext/sources/ggml/src/ggml-opencl/kernels/rms_norm.cl +25 -10
  358. data/ext/sources/ggml/src/ggml-opencl/kernels/rope.cl +50 -24
  359. data/ext/sources/ggml/src/ggml-opencl/kernels/set_rows.cl +35 -16
  360. data/ext/sources/ggml/src/ggml-opencl/kernels/softplus.cl +88 -0
  361. data/ext/sources/ggml/src/ggml-opencl/kernels/sqr.cl +53 -0
  362. data/ext/sources/ggml/src/ggml-opencl/kernels/sqrt.cl +53 -0
  363. data/ext/sources/ggml/src/ggml-opencl/kernels/ssm_conv.cl +77 -0
  364. data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +13 -0
  365. data/ext/sources/ggml/src/ggml-rpc/ggml-rpc.cpp +438 -156
  366. data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +48 -3
  367. data/ext/sources/ggml/src/ggml-sycl/add-id.cpp +77 -0
  368. data/ext/sources/ggml/src/ggml-sycl/add-id.hpp +8 -0
  369. data/ext/sources/ggml/src/ggml-sycl/backend.hpp +6 -0
  370. data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +0 -9
  371. data/ext/sources/ggml/src/ggml-sycl/binbcast.hpp +0 -6
  372. data/ext/sources/ggml/src/ggml-sycl/common.hpp +117 -15
  373. data/ext/sources/ggml/src/ggml-sycl/concat.cpp +55 -44
  374. data/ext/sources/ggml/src/ggml-sycl/convert.cpp +34 -0
  375. data/ext/sources/ggml/src/ggml-sycl/count-equal.cpp +79 -0
  376. data/ext/sources/ggml/src/ggml-sycl/count-equal.hpp +9 -0
  377. data/ext/sources/ggml/src/ggml-sycl/cpy.cpp +0 -3
  378. data/ext/sources/ggml/src/ggml-sycl/dequantize.hpp +18 -0
  379. data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +76 -3
  380. data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +333 -300
  381. data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +10 -2
  382. data/ext/sources/ggml/src/ggml-sycl/ggml-sycl.cpp +335 -110
  383. data/ext/sources/ggml/src/ggml-sycl/mmvq.cpp +22 -0
  384. data/ext/sources/ggml/src/ggml-sycl/norm.cpp +156 -0
  385. data/ext/sources/ggml/src/ggml-sycl/norm.hpp +2 -0
  386. data/ext/sources/ggml/src/ggml-sycl/pad.cpp +97 -0
  387. data/ext/sources/ggml/src/ggml-sycl/pad.hpp +24 -0
  388. data/ext/sources/ggml/src/ggml-sycl/pad_reflect_1d.cpp +100 -0
  389. data/ext/sources/ggml/src/ggml-sycl/pad_reflect_1d.hpp +10 -0
  390. data/ext/sources/ggml/src/ggml-sycl/presets.hpp +2 -0
  391. data/ext/sources/ggml/src/ggml-sycl/repeat_back.cpp +76 -0
  392. data/ext/sources/ggml/src/ggml-sycl/repeat_back.hpp +8 -0
  393. data/ext/sources/ggml/src/ggml-sycl/roll.cpp +122 -0
  394. data/ext/sources/ggml/src/ggml-sycl/roll.hpp +20 -0
  395. data/ext/sources/ggml/src/ggml-sycl/rope.cpp +30 -17
  396. data/ext/sources/ggml/src/ggml-sycl/set.cpp +73 -0
  397. data/ext/sources/ggml/src/ggml-sycl/set.hpp +5 -0
  398. data/ext/sources/ggml/src/ggml-sycl/softmax.cpp +327 -162
  399. data/ext/sources/ggml/src/ggml-sycl/softmax.hpp +4 -0
  400. data/ext/sources/ggml/src/ggml-sycl/ssm_conv.cpp +127 -0
  401. data/ext/sources/ggml/src/ggml-sycl/ssm_conv.hpp +5 -0
  402. data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +58 -0
  403. data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +38 -18
  404. data/ext/sources/ggml/src/ggml-vulkan/ggml-vulkan.cpp +5013 -2859
  405. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/abs.comp +21 -0
  406. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +2 -2
  407. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +2 -2
  408. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add1.comp +28 -0
  409. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add_id.comp +1 -1
  410. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/arange.comp +20 -0
  411. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +2 -2
  412. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +33 -26
  413. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort_large.comp +114 -0
  414. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/ceil.comp +22 -0
  415. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp +2 -2
  416. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp +2 -2
  417. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp +2 -2
  418. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp +1 -1
  419. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp +47 -49
  420. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv_transpose_1d.comp +1 -1
  421. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp +2 -2
  422. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +3 -3
  423. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +4 -4
  424. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_transpose.comp +67 -0
  425. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp +2 -2
  426. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/count_equal.comp +2 -2
  427. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/count_experts.comp +51 -0
  428. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cumsum.comp +83 -0
  429. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cumsum_multipass1.comp +60 -0
  430. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cumsum_multipass2.comp +66 -0
  431. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp +1 -1
  432. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{dequant_funcs.comp → dequant_funcs.glsl} +9 -21
  433. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{dequant_funcs_cm2.comp → dequant_funcs_cm2.glsl} +18 -4
  434. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{dequant_head.comp → dequant_head.glsl} +1 -1
  435. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_m.comp +1 -1
  436. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_s.comp +1 -1
  437. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +1 -1
  438. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp +1 -1
  439. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +1 -1
  440. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +1 -1
  441. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +1 -1
  442. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp +1 -1
  443. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp +1 -1
  444. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_mxfp4.comp +3 -3
  445. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +3 -3
  446. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +1 -1
  447. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp +1 -1
  448. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp +1 -1
  449. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +3 -3
  450. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp +1 -1
  451. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp +1 -1
  452. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +3 -3
  453. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +1 -1
  454. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp +1 -1
  455. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/diag.comp +29 -0
  456. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp +1 -1
  457. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/div.comp +2 -2
  458. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/exp.comp +3 -3
  459. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/fill.comp +19 -0
  460. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +39 -17
  461. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{flash_attn_base.comp → flash_attn_base.glsl} +19 -1
  462. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +45 -7
  463. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +50 -12
  464. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +1 -1
  465. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/floor.comp +22 -0
  466. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu.comp +2 -2
  467. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu_erf.comp +2 -2
  468. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu_quick.comp +2 -2
  469. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp +2 -2
  470. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_erf.comp +2 -2
  471. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp +2 -2
  472. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{generic_binary_head.comp → generic_binary_head.glsl} +17 -2
  473. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{generic_head.comp → generic_head.glsl} +2 -0
  474. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{generic_unary_head.comp → generic_unary_head.glsl} +7 -0
  475. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +4 -4
  476. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +3 -3
  477. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{glu_head.comp → glu_head.glsl} +1 -1
  478. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp +2 -2
  479. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/hardsigmoid.comp +2 -2
  480. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/hardswish.comp +2 -2
  481. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +19 -7
  482. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col_3d.comp +2 -3
  483. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +2 -2
  484. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp +2 -2
  485. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/log.comp +18 -0
  486. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp +2 -2
  487. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +2 -2
  488. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{mul_mat_vec_base.comp → mul_mat_vec_base.glsl} +70 -25
  489. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iface.glsl +35 -0
  490. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_m.comp +71 -21
  491. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp +41 -25
  492. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_s.comp +2 -2
  493. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xs.comp +44 -26
  494. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xxs.comp +2 -2
  495. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_s.comp +2 -2
  496. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_xxs.comp +2 -2
  497. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +9 -7
  498. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp +9 -7
  499. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +4 -6
  500. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +2 -2
  501. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +4 -6
  502. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +4 -6
  503. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +2 -2
  504. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vecq.comp +39 -36
  505. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vecq_funcs.glsl +494 -0
  506. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +78 -103
  507. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +34 -23
  508. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{mul_mm_funcs.comp → mul_mm_funcs.glsl} +69 -59
  509. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_id_funcs.glsl +72 -0
  510. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +88 -228
  511. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.glsl +454 -0
  512. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_shmem_types.glsl +78 -0
  513. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/multi_add.comp +97 -13
  514. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/neg.comp +20 -0
  515. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp +2 -2
  516. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_adamw.comp +2 -2
  517. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_sgd.comp +1 -1
  518. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +21 -6
  519. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp +1 -1
  520. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +10 -10
  521. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/reglu.comp +2 -2
  522. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp +2 -2
  523. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp +2 -2
  524. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat_back.comp +2 -2
  525. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +50 -4
  526. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_back.comp +2 -2
  527. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_partials.comp +2 -2
  528. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/roll.comp +2 -2
  529. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_funcs.glsl +234 -0
  530. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.glsl +20 -0
  531. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +6 -50
  532. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +6 -33
  533. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +6 -33
  534. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_params.glsl +28 -0
  535. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +6 -39
  536. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/round.comp +29 -0
  537. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +2 -2
  538. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sigmoid.comp +2 -2
  539. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp +2 -2
  540. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu_back.comp +2 -2
  541. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp +2 -2
  542. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +1 -1
  543. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +2 -2
  544. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_large1.comp +62 -0
  545. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_large2.comp +79 -0
  546. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_large3.comp +65 -0
  547. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_large_common.glsl +53 -0
  548. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/softplus.comp +23 -0
  549. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/solve_tri.comp +81 -0
  550. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sqrt.comp +2 -2
  551. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/square.comp +2 -2
  552. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp +44 -0
  553. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/ssm_scan.comp +124 -0
  554. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/step.comp +22 -0
  555. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sub.comp +2 -2
  556. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +2 -25
  557. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.glsl +25 -0
  558. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/swiglu.comp +2 -2
  559. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/swiglu_oai.comp +2 -2
  560. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp +2 -2
  561. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +1 -1
  562. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/topk_argsort.comp +118 -0
  563. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/topk_moe.comp +213 -0
  564. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/topk_nary_search.comp +246 -0
  565. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/tri.comp +43 -0
  566. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/trunc.comp +22 -0
  567. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{types.comp → types.glsl} +345 -26
  568. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +90 -12
  569. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +335 -151
  570. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/xielu.comp +35 -0
  571. data/ext/sources/ggml/src/ggml-webgpu/CMakeLists.txt +28 -2
  572. data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu-shader-lib.hpp +169 -0
  573. data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu.cpp +1964 -435
  574. data/ext/sources/ggml/src/ggml-webgpu/pre_wgsl.hpp +778 -0
  575. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/bin_op.tmpl.wgsl +188 -0
  576. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cpy.tmpl.wgsl +101 -0
  577. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/embed_wgsl.py +33 -10
  578. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/flash_attn.wgsl +591 -0
  579. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/get_rows.tmpl.wgsl +1 -1
  580. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/glu.tmpl.wgsl +323 -0
  581. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat.tmpl.wgsl +6 -6
  582. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_decls.tmpl +97 -0
  583. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_reg_tile.tmpl.wgsl +247 -0
  584. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_subgroup_matrix.tmpl.wgsl +302 -0
  585. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_vec.tmpl.wgsl +267 -0
  586. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm.wgsl +83 -17
  587. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rope.tmpl.wgsl +295 -0
  588. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/scale.tmpl.wgsl +90 -0
  589. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.tmpl.wgsl +112 -0
  590. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/soft_max.tmpl.wgsl +345 -0
  591. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/unary_op.wgsl +483 -0
  592. data/ext/sources/ggml/src/ggml-zendnn/CMakeLists.txt +92 -0
  593. data/ext/sources/ggml/src/ggml-zendnn/ggml-zendnn.cpp +466 -0
  594. data/ext/sources/ggml/src/ggml.c +425 -33
  595. data/ext/sources/include/whisper.h +1 -0
  596. data/ext/sources/src/CMakeLists.txt +3 -1
  597. data/ext/sources/src/whisper.cpp +101 -35
  598. data/ext/sources/tests/CMakeLists.txt +2 -2
  599. data/ext/sources/tests/test-vad-full.cpp +4 -2
  600. data/ext/sources/tests/test-vad.cpp +1 -1
  601. data/extsources.rb +1 -0
  602. data/lib/whisper/model/uri.rb +17 -18
  603. data/sig/whisper.rbs +119 -2
  604. data/test/test_params.rb +16 -8
  605. data/test/test_segment.rb +0 -1
  606. data/test/test_token.rb +70 -0
  607. data/test/test_vad.rb +1 -1
  608. data/test/test_vad_context.rb +50 -0
  609. data/test/test_vad_segment.rb +19 -0
  610. data/test/test_vad_segments.rb +16 -0
  611. data/test/test_whisper.rb +7 -0
  612. data/whispercpp.gemspec +1 -1
  613. metadata +287 -34
  614. data/ext/sources/build-xcframework.sh +0 -571
  615. data/ext/sources/ggml/src/ggml-cann/Doxyfile +0 -2579
  616. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +0 -105
  617. data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +0 -55
  618. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/add.tmpl.wgsl +0 -44
  619. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/add_in_place.tmpl.wgsl +0 -41
  620. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cpy.wgsl +0 -60
  621. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul.tmpl.wgsl +0 -44
  622. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_in_place.tmpl.wgsl +0 -41
  623. data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm_in_place.wgsl +0 -48
  624. /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{test_bfloat16_support.comp → feature-tests/bfloat16.comp} +0 -0
  625. /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{test_coopmat_support.comp → feature-tests/coopmat.comp} +0 -0
  626. /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{test_coopmat2_support.comp → feature-tests/coopmat2.comp} +0 -0
  627. /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{test_integer_dot_support.comp → feature-tests/integer_dot.comp} +0 -0
  628. /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{glu_main.comp → glu_main.glsl} +0 -0
  629. /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{rte.comp → rte.glsl} +0 -0
  630. /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{utils.comp → utils.glsl} +0 -0
@@ -0,0 +1,1020 @@
1
+ #pragma clang diagnostic ignored "-Wunused-variable"
2
+ #pragma clang diagnostic ignored "-Wunused-function"
3
+ #pragma clang diagnostic ignored "-Wunused-but-set-variable"
4
+
5
+ #ifdef HTP_DEBUG
6
+ # define FARF_HIGH 1
7
+ #endif
8
+
9
+ #include <HAP_farf.h>
10
+ #include <HAP_mem.h>
11
+ #include <HAP_perf.h>
12
+ #include <HAP_ps.h>
13
+ #include <hexagon_protos.h>
14
+ #include <hexagon_types.h>
15
+ #include <math.h>
16
+ #include <string.h>
17
+
18
+ #define GGML_COMMON_DECL_C
19
+ #include "ggml-common.h"
20
+ #include "hvx-utils.h"
21
+
22
+ #define htp_binary_ops_preamble \
23
+ int step_of_4 = num_elems >> 7; \
24
+ int step_of_2 = (num_elems - step_of_4 * VLEN_FP32 * 4) >> 6; \
25
+ int step_of_1 = (num_elems - step_of_4 * VLEN_FP32 * 4 - step_of_2 * VLEN_FP32 * 2) >> 5; \
26
+ int remaining = num_elems - step_of_4 * VLEN_FP32 * 4 - step_of_2 * VLEN_FP32 * 2 - step_of_1 * VLEN_FP32; \
27
+ \
28
+ const uint8_t * restrict src0_curr = src0; \
29
+ const uint8_t * restrict src1_curr = src1; \
30
+ uint8_t * restrict dst_curr = dst;
31
+
32
+ void hvx_mul_f32(const uint8_t * restrict src0,
33
+ const uint8_t * restrict src1,
34
+ uint8_t * restrict dst,
35
+ const int num_elems) {
36
+ int left_over = num_elems & (VLEN_FP32 - 1);
37
+ int num_elems_whole = num_elems - left_over;
38
+
39
+ int unaligned_addr = 0;
40
+ int unaligned_loop = 0;
41
+ if ((0 == htp_is_aligned((void *) src0, VLEN)) || (0 == htp_is_aligned((void *) src1, VLEN)) ||
42
+ (0 == htp_is_aligned((void *) dst, VLEN))) {
43
+ FARF(HIGH, "hvx_mul_f32: unaligned address in hvx op, possibly slower execution\n");
44
+ unaligned_addr = 1;
45
+ }
46
+
47
+ if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
48
+ unaligned_loop = 1;
49
+ FARF(HIGH, "hvx_mul_f32: unaligned loop in hvx op, possibly slower execution\n");
50
+ }
51
+
52
+
53
+ bool handled_leftover = false;
54
+ if (0 == unaligned_loop) {
55
+ HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0;
56
+ HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1;
57
+ HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
58
+
59
+ #pragma unroll(4)
60
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
61
+ HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1++, *vec_in2++);
62
+ *vec_out++ = Q6_Vsf_equals_Vqf32(v);
63
+ }
64
+ } else {
65
+ int step_of_1 = num_elems_whole >> 5; // divby 32, because 32 float = 128 bytes per HVX vector
66
+ int leftover_size = left_over * sizeof(float);
67
+
68
+
69
+ HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0;
70
+ HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1;
71
+ HVX_UVector * restrict vec_out = (HVX_UVector *) dst;
72
+
73
+ HVX_Vector slinep;
74
+ HVX_Vector slinec;
75
+ HVX_Vector sline;
76
+ HVX_Vector sline2p;
77
+ HVX_Vector sline2c;
78
+ HVX_Vector sline2;
79
+
80
+ slinep = *vec_in1++;
81
+ sline2p = *vec_in2++;
82
+ #pragma unroll(4)
83
+ for (int i = step_of_1 - 1; i > 0; i--) {
84
+ slinec = *vec_in1++;
85
+ sline2c = *vec_in2++;
86
+ sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src0);
87
+ sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) src1);
88
+
89
+ *((HVX_UVector *) (vec_out++)) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, sline2));
90
+ slinep = slinec;
91
+ sline2p = sline2c;
92
+ }
93
+ if (step_of_1 > 1) {
94
+ slinec = htp_is_aligned(vec_in1, VLEN) && left_over == 0 ? slinep : *vec_in1++;
95
+ sline2c = htp_is_aligned(vec_in2, VLEN) && left_over == 0 ? sline2p : *vec_in2++;
96
+
97
+ sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src0);
98
+ sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) src1);
99
+ *((HVX_UVector *) (vec_out++)) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, sline2));
100
+ slinep = slinec;
101
+ sline2p = sline2c;
102
+ }
103
+ if (left_over > 0) {
104
+ slinec = (is_in_one_chunk(vec_in1, leftover_size, VLEN) ? slinep : *vec_in1++);
105
+
106
+ sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src0);
107
+ sline2c = (is_in_one_chunk(vec_in2, leftover_size, VLEN) ? sline2p : *vec_in2++);
108
+ sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) src1);
109
+
110
+ HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(sline, sline2);
111
+ hvx_vec_store_u(vec_out, leftover_size, Q6_Vsf_equals_Vqf32(out));
112
+ handled_leftover = true;
113
+ }
114
+ }
115
+
116
+
117
+ if (left_over > 0 && !handled_leftover) {
118
+ const float * src0f = (const float *) src0 + num_elems_whole;
119
+ const float * src1f = (const float *) src1 + num_elems_whole;
120
+ float * dstf = (float *) dst + num_elems_whole;
121
+
122
+ HVX_Vector in1 = *(HVX_UVector *) src0f;
123
+ HVX_Vector in2 = *(HVX_UVector *) src1f;
124
+
125
+ HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in1, in2);
126
+ hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out));
127
+ }
128
+ }
129
+
130
+ void hvx_mul_f32_opt(const uint8_t * restrict src0,
131
+ const uint8_t * restrict src1,
132
+ uint8_t * restrict dst,
133
+ const int num_elems) {
134
+ htp_binary_ops_preamble;
135
+
136
+ for (int i = 0; i < step_of_4; i++) {
137
+ HVX_Vector v1a = *(HVX_Vector *) src0_curr;
138
+
139
+ HVX_Vector v1b = *(HVX_Vector *) src1_curr;
140
+
141
+ HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
142
+
143
+ HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(v1a, v1b);
144
+
145
+ HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
146
+
147
+ HVX_Vector v3a = *(HVX_Vector *) (src0_curr + 2 * VLEN);
148
+
149
+ HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v2a, v2b);
150
+
151
+ *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
152
+
153
+ HVX_Vector v3b = *(HVX_Vector *) (src1_curr + 2 * VLEN);
154
+
155
+ HVX_Vector v4a = *(HVX_Vector *) (src0_curr + 3 * VLEN);
156
+
157
+ src0_curr += 4 * VLEN;
158
+
159
+ HVX_Vector v3 = Q6_Vqf32_vmpy_VsfVsf(v3a, v3b);
160
+
161
+ *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
162
+
163
+ HVX_Vector v4b = *(HVX_Vector *) (src1_curr + 3 * VLEN);
164
+
165
+ *(HVX_Vector *) (dst_curr + 2 * VLEN) = Q6_Vsf_equals_Vqf32(v3);
166
+
167
+ HVX_Vector v4 = Q6_Vqf32_vmpy_VsfVsf(v4a, v4b);
168
+
169
+ src1_curr += 4 * VLEN;
170
+
171
+ *(HVX_Vector *) (dst_curr + 3 * VLEN) = Q6_Vsf_equals_Vqf32(v4);
172
+
173
+ dst_curr += 4 * VLEN;
174
+ }
175
+
176
+ for (int i = 0; i < step_of_2; i++) {
177
+ HVX_Vector v1a = *(HVX_Vector *) src0_curr;
178
+
179
+ HVX_Vector v1b = *(HVX_Vector *) src1_curr;
180
+
181
+ HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
182
+
183
+ HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(v1a, v1b);
184
+
185
+ HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
186
+
187
+ *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
188
+
189
+ src0_curr += 2 * VLEN;
190
+
191
+ HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v2a, v2b);
192
+
193
+ src1_curr += 2 * VLEN;
194
+
195
+ *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
196
+
197
+ dst_curr += 2 * VLEN;
198
+ }
199
+
200
+ for (int i = 0; i < step_of_1; i++) {
201
+ HVX_Vector va = *(HVX_Vector *) src0_curr;
202
+
203
+ src0_curr += VLEN;
204
+
205
+ HVX_Vector vb = *(HVX_Vector *) src1_curr;
206
+
207
+ src1_curr += VLEN;
208
+
209
+ HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(va, vb);
210
+
211
+ *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v);
212
+
213
+ dst_curr += VLEN;
214
+ }
215
+
216
+ if (remaining > 0) {
217
+ HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr);
218
+ hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v));
219
+ }
220
+ }
221
+
222
+ void hvx_mul_mul_f32_opt(const uint8_t * restrict src0,
223
+ const uint8_t * restrict src1,
224
+ const uint8_t * restrict src2,
225
+ uint8_t * restrict dst,
226
+ const int num_elems) {
227
+ const uint8_t * restrict src0_curr = src0;
228
+ const uint8_t * restrict src1_curr = src1;
229
+ const uint8_t * restrict src2_curr = src2;
230
+ uint8_t * restrict dst_curr = dst;
231
+
232
+ int step_of_2 = num_elems >> 6;
233
+ int step_of_1 = (num_elems - step_of_2 * VLEN_FP32 * 2) >> 5;
234
+ int remaining = num_elems - step_of_2 * VLEN_FP32 * 2 - step_of_1 * VLEN_FP32;
235
+
236
+ for (int i = 0; i < step_of_2; i++) {
237
+ HVX_Vector v1a = *(HVX_Vector *) src0_curr;
238
+ HVX_Vector v1b = *(HVX_Vector *) src1_curr;
239
+ HVX_Vector v1c = *(HVX_Vector *) src2_curr;
240
+
241
+ HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
242
+
243
+ HVX_Vector v1_ = Q6_Vqf32_vmpy_VsfVsf(v1a, v1b);
244
+ HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v1_), v1c);
245
+
246
+ HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
247
+
248
+ *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
249
+
250
+ HVX_Vector v2c = *(HVX_Vector *) (src2_curr + VLEN);
251
+
252
+ src0_curr += 2 * VLEN;
253
+
254
+ HVX_Vector v2_ = Q6_Vqf32_vmpy_VsfVsf(v2a, v2b);
255
+ HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v2_), v2c);
256
+
257
+ src1_curr += 2 * VLEN;
258
+ src2_curr += 2 * VLEN;
259
+
260
+ *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
261
+
262
+ dst_curr += 2 * VLEN;
263
+ }
264
+ for (int i = 0; i < step_of_1; i++) {
265
+ HVX_Vector va = *(HVX_Vector *) src0_curr;
266
+ src0_curr += VLEN;
267
+
268
+ HVX_Vector vb = *(HVX_Vector *) src1_curr;
269
+ src1_curr += VLEN;
270
+
271
+ HVX_Vector vc = *(HVX_Vector *) src2_curr;
272
+ src2_curr += VLEN;
273
+
274
+ HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(va, vb);
275
+ HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v1), vc);
276
+
277
+ *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v2);
278
+ dst_curr += VLEN;
279
+ }
280
+ if (remaining > 0) {
281
+ HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr);
282
+ HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v1), *(HVX_Vector *) src2_curr);
283
+ hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v2));
284
+ }
285
+ }
286
+
287
+ void hvx_add_f32(const uint8_t * restrict src0,
288
+ const uint8_t * restrict src1,
289
+ uint8_t * restrict dst,
290
+ const int num_elems) {
291
+ int left_over = num_elems & (VLEN_FP32 - 1);
292
+ int num_elems_whole = num_elems - left_over;
293
+
294
+ int unaligned_addr = 0;
295
+ int unaligned_loop = 0;
296
+ if ((0 == htp_is_aligned((void *) src0, VLEN)) || (0 == htp_is_aligned((void *) src1, VLEN)) ||
297
+ (0 == htp_is_aligned((void *) dst, VLEN))) {
298
+ FARF(HIGH, "hvx_add_f32: unaligned address in hvx op, possibly slower execution\n");
299
+ unaligned_addr = 1;
300
+ }
301
+
302
+ if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
303
+ unaligned_loop = 1;
304
+ FARF(HIGH, "hvx_add_f32: unaligned loop in hvx op, possibly slower execution\n");
305
+ }
306
+
307
+ if (0 == unaligned_loop) {
308
+ HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0;
309
+ HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1;
310
+ HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
311
+
312
+ #pragma unroll(4)
313
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
314
+ HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(*vec_in1++, *vec_in2++);
315
+ *vec_out++ = Q6_Vsf_equals_Vqf32(v);
316
+ }
317
+ } else {
318
+ #pragma unroll(4)
319
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
320
+ HVX_Vector in1 = *(HVX_UVector *) (src0 + i * SIZEOF_FP32);
321
+ HVX_Vector in2 = *(HVX_UVector *) (src1 + i * SIZEOF_FP32);
322
+
323
+ HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in1, in2);
324
+
325
+ *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out);
326
+ }
327
+ }
328
+
329
+ if (left_over > 0) {
330
+ const float * src0f = (const float *) src0 + num_elems_whole;
331
+ const float * src1f = (const float *) src1 + num_elems_whole;
332
+ float * dstf = (float *) dst + num_elems_whole;
333
+
334
+ HVX_Vector in1 = *(HVX_UVector *) src0f;
335
+ HVX_Vector in2 = *(HVX_UVector *) src1f;
336
+
337
+ HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in1, in2);
338
+ hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out));
339
+ }
340
+ }
341
+
342
+ void hvx_add_f32_opt(const uint8_t * restrict src0,
343
+ const uint8_t * restrict src1,
344
+ uint8_t * restrict dst,
345
+ const int num_elems) {
346
+ htp_binary_ops_preamble;
347
+
348
+ for (int i = 0; i < step_of_4; i++) {
349
+ HVX_Vector v1a = *(HVX_Vector *) src0_curr;
350
+
351
+ HVX_Vector v1b = *(HVX_Vector *) src1_curr;
352
+
353
+ HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
354
+
355
+ HVX_Vector v1 = Q6_Vqf32_vadd_VsfVsf(v1a, v1b);
356
+
357
+ HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
358
+
359
+ HVX_Vector v3a = *(HVX_Vector *) (src0_curr + 2 * VLEN);
360
+
361
+ HVX_Vector v2 = Q6_Vqf32_vadd_VsfVsf(v2a, v2b);
362
+
363
+ *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
364
+
365
+ HVX_Vector v3b = *(HVX_Vector *) (src1_curr + 2 * VLEN);
366
+
367
+ HVX_Vector v4a = *(HVX_Vector *) (src0_curr + 3 * VLEN);
368
+
369
+ src0_curr += 4 * VLEN;
370
+
371
+ HVX_Vector v3 = Q6_Vqf32_vadd_VsfVsf(v3a, v3b);
372
+
373
+ *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
374
+
375
+ HVX_Vector v4b = *(HVX_Vector *) (src1_curr + 3 * VLEN);
376
+
377
+ *(HVX_Vector *) (dst_curr + 2 * VLEN) = Q6_Vsf_equals_Vqf32(v3);
378
+
379
+ HVX_Vector v4 = Q6_Vqf32_vadd_VsfVsf(v4a, v4b);
380
+
381
+ src1_curr += 4 * VLEN;
382
+
383
+ *(HVX_Vector *) (dst_curr + 3 * VLEN) = Q6_Vsf_equals_Vqf32(v4);
384
+
385
+ dst_curr += 4 * VLEN;
386
+ }
387
+ for (int i = 0; i < step_of_2; i++) {
388
+ HVX_Vector v1a = *(HVX_Vector *) src0_curr;
389
+
390
+ HVX_Vector v1b = *(HVX_Vector *) src1_curr;
391
+
392
+ HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
393
+
394
+ HVX_Vector v1 = Q6_Vqf32_vadd_VsfVsf(v1a, v1b);
395
+
396
+ HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
397
+
398
+ *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
399
+
400
+ src0_curr += 2 * VLEN;
401
+
402
+ HVX_Vector v2 = Q6_Vqf32_vadd_VsfVsf(v2a, v2b);
403
+
404
+ src1_curr += 2 * VLEN;
405
+
406
+ *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
407
+
408
+ dst_curr += 2 * VLEN;
409
+ }
410
+ for (int i = 0; i < step_of_1; i++) {
411
+ HVX_Vector va = *(HVX_Vector *) src0_curr;
412
+
413
+ src0_curr += VLEN;
414
+
415
+ HVX_Vector vb = *(HVX_Vector *) src1_curr;
416
+
417
+ src1_curr += VLEN;
418
+
419
+ HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(va, vb);
420
+
421
+ *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v);
422
+
423
+ dst_curr += VLEN;
424
+ }
425
+ if (remaining > 0) {
426
+ HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr);
427
+ hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v));
428
+ }
429
+ }
430
+
431
+ void hvx_add_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) {
432
+ size_t left_over = num_elems & (VLEN_FP32 - 1);
433
+ size_t num_elems_whole = num_elems - left_over;
434
+
435
+ int unaligned_addr = 0;
436
+ int unaligned_loop = 0;
437
+ if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) {
438
+ FARF(HIGH, "hvx_add_scalar_f32: unaligned address in hvx op, possibly slower execution\n");
439
+ unaligned_addr = 1;
440
+ }
441
+
442
+ if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
443
+ unaligned_loop = 1;
444
+ FARF(HIGH, "hvx_add_scalar_f32: unaligned loop in hvx op, possibly slower execution\n");
445
+ }
446
+
447
+ static const float kInf = INFINITY;
448
+ const HVX_Vector inf = hvx_vec_splat_fp32(kInf);
449
+ HVX_Vector val_vec = hvx_vec_splat_fp32(val);
450
+
451
+ if (0 == unaligned_loop) {
452
+ HVX_Vector * restrict vec_in1 = (HVX_Vector *) src;
453
+ HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
454
+
455
+ #pragma unroll(4)
456
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
457
+ HVX_Vector in = *vec_in1++;
458
+ const HVX_VectorPred pred_inf = Q6_Q_vcmp_eq_VwVw(inf, in);
459
+ HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(in, val_vec);
460
+ v = Q6_Vsf_equals_Vqf32(v);
461
+ v = Q6_V_vmux_QVV(pred_inf, inf, v);
462
+ *vec_out++ = v;
463
+ }
464
+ } else {
465
+ #pragma unroll(4)
466
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
467
+ HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32);
468
+
469
+ const HVX_VectorPred pred_inf = Q6_Q_vcmp_eq_VwVw(inf, in);
470
+ HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in, val_vec);
471
+ out = Q6_Vsf_equals_Vqf32(out);
472
+ out = Q6_V_vmux_QVV(pred_inf, inf, out);
473
+
474
+ *(HVX_UVector *) (dst + i * SIZEOF_FP32) = out;
475
+ }
476
+ }
477
+
478
+ if (left_over > 0) {
479
+ const float * srcf = (const float *) src + num_elems_whole;
480
+ float * dstf = (float *) dst + num_elems_whole;
481
+
482
+ HVX_Vector in = *(HVX_UVector *) srcf;
483
+
484
+ const HVX_VectorPred pred_inf = Q6_Q_vcmp_eq_VwVw(inf, in);
485
+ HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in, val_vec);
486
+ out = Q6_Vsf_equals_Vqf32(out);
487
+ out = Q6_V_vmux_QVV(pred_inf, inf, out);
488
+
489
+ hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, out);
490
+ }
491
+ }
492
+
493
+ void hvx_mul_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) {
494
+ size_t left_over = num_elems & (VLEN_FP32 - 1);
495
+ size_t num_elems_whole = num_elems - left_over;
496
+
497
+ int unaligned_addr = 0;
498
+ int unaligned_loop = 0;
499
+ if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) {
500
+ FARF(HIGH, "hvx_mul_scalar_f32: unaligned address in hvx op, possibly slower execution\n");
501
+ unaligned_addr = 1;
502
+ }
503
+
504
+ if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
505
+ unaligned_loop = 1;
506
+ FARF(HIGH, "hvx_mul_scalar_f32: unaligned loop in hvx op, possibly slower execution\n");
507
+ }
508
+
509
+ HVX_Vector val_vec = hvx_vec_splat_fp32(val);
510
+ bool handled_leftover = false;
511
+ if (0 == unaligned_loop) {
512
+ HVX_Vector * restrict vec_in1 = (HVX_Vector *) src;
513
+ HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
514
+
515
+ #pragma unroll(4)
516
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
517
+ HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1++, val_vec);
518
+ *vec_out++ = Q6_Vsf_equals_Vqf32(v);
519
+ }
520
+ } else {
521
+ int step_of_1 = num_elems >> 5; // divby 32, because 32 float = 128 bytes per HVX vector
522
+ int leftover_size = left_over * sizeof(float);
523
+
524
+ HVX_Vector * input_v_ptr = (HVX_Vector *) src;
525
+ HVX_UVector * output_v_ptr = (HVX_UVector *) dst;
526
+
527
+ HVX_Vector slinep;
528
+ HVX_Vector slinec;
529
+ HVX_Vector sline;
530
+
531
+ slinep = *input_v_ptr++;
532
+
533
+ #pragma unroll(4)
534
+ for (int i = step_of_1 - 1; i > 0; i--) {
535
+ slinec = *input_v_ptr++;
536
+ sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src);
537
+ *((HVX_UVector *) (output_v_ptr++)) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, val_vec));
538
+ /* Prepare slinep for next iteration */
539
+ slinep = slinec;
540
+ }
541
+
542
+ if (step_of_1 > 0) {
543
+ slinec = htp_is_aligned(input_v_ptr, VLEN) && left_over == 0 ? slinep : *input_v_ptr++;
544
+ sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src);
545
+ *((HVX_UVector *) (output_v_ptr++)) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, val_vec));
546
+
547
+ slinep = slinec;
548
+ }
549
+
550
+ if (leftover_size > 0) {
551
+ slinec = (is_in_one_chunk(input_v_ptr, leftover_size, VLEN) ? slinep : *input_v_ptr++);
552
+
553
+ sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src);
554
+
555
+ HVX_Vector sout = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, val_vec));
556
+ hvx_vec_store_u(output_v_ptr, leftover_size, sout);
557
+ handled_leftover = true;
558
+ }
559
+ }
560
+
561
+ if (left_over > 0 && !handled_leftover) {
562
+ const float * srcf = (const float *) src + num_elems_whole;
563
+ float * dstf = (float *) dst + num_elems_whole;
564
+
565
+ HVX_Vector in = *(HVX_UVector *) srcf;
566
+
567
+ HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in, val_vec);
568
+ hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out));
569
+ }
570
+ }
571
+
572
+ void hvx_sub_f32(const uint8_t * restrict src0,
573
+ const uint8_t * restrict src1,
574
+ uint8_t * restrict dst,
575
+ const int num_elems) {
576
+ size_t left_over = num_elems & (VLEN_FP32 - 1);
577
+ size_t num_elems_whole = num_elems - left_over;
578
+
579
+ int unaligned_addr = 0;
580
+ int unaligned_loop = 0;
581
+ if ((0 == htp_is_aligned((void *) src0, VLEN)) || (0 == htp_is_aligned((void *) src1, VLEN)) ||
582
+ (0 == htp_is_aligned((void *) dst, VLEN))) {
583
+ FARF(HIGH, "hvx_sub_f32: unaligned address in hvx op, possibly slower execution\n");
584
+ unaligned_addr = 1;
585
+ }
586
+
587
+ if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
588
+ unaligned_loop = 1;
589
+ FARF(HIGH, "hvx_sub_f32: unaligned loop in hvx op, possibly slower execution\n");
590
+ }
591
+
592
+ if (0 == unaligned_loop) {
593
+ HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0;
594
+ HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1;
595
+ HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
596
+
597
+ #pragma unroll(4)
598
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
599
+ HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(*vec_in1++, *vec_in2++);
600
+ *vec_out++ = Q6_Vsf_equals_Vqf32(v);
601
+ }
602
+ } else {
603
+ #pragma unroll(4)
604
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
605
+ HVX_Vector in1 = *(HVX_UVector *) (src0 + i * SIZEOF_FP32);
606
+ HVX_Vector in2 = *(HVX_UVector *) (src1 + i * SIZEOF_FP32);
607
+
608
+ HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in1, in2);
609
+
610
+ *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out);
611
+ }
612
+ }
613
+
614
+ if (left_over > 0) {
615
+ const float * src0f = (const float *) src0 + num_elems_whole;
616
+ const float * src1f = (const float *) src1 + num_elems_whole;
617
+ float * dstf = (float *) dst + num_elems_whole;
618
+
619
+ HVX_Vector in1 = *(HVX_UVector *) src0f;
620
+ HVX_Vector in2 = *(HVX_UVector *) src1f;
621
+
622
+ HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in1, in2);
623
+ hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out));
624
+ }
625
+ }
626
+
627
+ void hvx_sub_f32_opt(const uint8_t * restrict src0,
628
+ const uint8_t * restrict src1,
629
+ uint8_t * restrict dst,
630
+ const int num_elems) {
631
+ htp_binary_ops_preamble;
632
+
633
+ for (int i = 0; i < step_of_4; i++) {
634
+ HVX_Vector v1a = *(HVX_Vector *) src0_curr;
635
+
636
+ HVX_Vector v1b = *(HVX_Vector *) src1_curr;
637
+
638
+ HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
639
+
640
+ HVX_Vector v1 = Q6_Vqf32_vsub_VsfVsf(v1a, v1b);
641
+
642
+ HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
643
+
644
+ HVX_Vector v3a = *(HVX_Vector *) (src0_curr + 2 * VLEN);
645
+
646
+ HVX_Vector v2 = Q6_Vqf32_vsub_VsfVsf(v2a, v2b);
647
+
648
+ *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
649
+
650
+ HVX_Vector v3b = *(HVX_Vector *) (src1_curr + 2 * VLEN);
651
+
652
+ HVX_Vector v4a = *(HVX_Vector *) (src0_curr + 3 * VLEN);
653
+
654
+ src0_curr += 4 * VLEN;
655
+
656
+ HVX_Vector v3 = Q6_Vqf32_vsub_VsfVsf(v3a, v3b);
657
+
658
+ *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
659
+
660
+ HVX_Vector v4b = *(HVX_Vector *) (src1_curr + 3 * VLEN);
661
+
662
+ *(HVX_Vector *) (dst_curr + 2 * VLEN) = Q6_Vsf_equals_Vqf32(v3);
663
+
664
+ HVX_Vector v4 = Q6_Vqf32_vsub_VsfVsf(v4a, v4b);
665
+
666
+ src1_curr += 4 * VLEN;
667
+
668
+ *(HVX_Vector *) (dst_curr + 3 * VLEN) = Q6_Vsf_equals_Vqf32(v4);
669
+
670
+ dst_curr += 4 * VLEN;
671
+ }
672
+ for (int i = 0; i < step_of_2; i++) {
673
+ HVX_Vector v1a = *(HVX_Vector *) src0_curr;
674
+
675
+ HVX_Vector v1b = *(HVX_Vector *) src1_curr;
676
+
677
+ HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
678
+
679
+ HVX_Vector v1 = Q6_Vqf32_vsub_VsfVsf(v1a, v1b);
680
+
681
+ HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
682
+
683
+ *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
684
+
685
+ src0_curr += 2 * VLEN;
686
+
687
+ HVX_Vector v2 = Q6_Vqf32_vsub_VsfVsf(v2a, v2b);
688
+
689
+ src1_curr += 2 * VLEN;
690
+
691
+ *(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
692
+
693
+ dst_curr += 2 * VLEN;
694
+ }
695
+ for (int i = 0; i < step_of_1; i++) {
696
+ HVX_Vector va = *(HVX_Vector *) src0_curr;
697
+
698
+ src0_curr += VLEN;
699
+
700
+ HVX_Vector vb = *(HVX_Vector *) src1_curr;
701
+
702
+ src1_curr += VLEN;
703
+
704
+ HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(va, vb);
705
+
706
+ *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v);
707
+
708
+ dst_curr += VLEN;
709
+ }
710
+ if (remaining > 0) {
711
+ HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr);
712
+ hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v));
713
+ }
714
+ }
715
+
716
+ void hvx_sub_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) {
717
+ size_t left_over = num_elems & (VLEN_FP32 - 1);
718
+ size_t num_elems_whole = num_elems - left_over;
719
+
720
+ int unaligned_addr = 0;
721
+ int unaligned_loop = 0;
722
+ if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) {
723
+ FARF(HIGH, "hvx_sub_scalar_f32: unaligned address in hvx op, possibly slower execution\n");
724
+ unaligned_addr = 1;
725
+ }
726
+
727
+ if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
728
+ unaligned_loop = 1;
729
+ FARF(HIGH, "hvx_sub_scalar_f32: unaligned loop in hvx op, possibly slower execution\n");
730
+ }
731
+
732
+ HVX_Vector val_vec = hvx_vec_splat_fp32(val);
733
+
734
+ if (0 == unaligned_loop) {
735
+ HVX_Vector * restrict vec_in1 = (HVX_Vector *) src;
736
+ HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
737
+
738
+ #pragma unroll(4)
739
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
740
+ HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(*vec_in1++, val_vec);
741
+ *vec_out++ = Q6_Vsf_equals_Vqf32(v);
742
+ }
743
+ } else {
744
+ #pragma unroll(4)
745
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
746
+ HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32);
747
+
748
+ HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in, val_vec);
749
+
750
+ *(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out);
751
+ }
752
+ }
753
+
754
+ if (left_over > 0) {
755
+ const float * srcf = (const float *) src + num_elems_whole;
756
+ float * dstf = (float *) dst + num_elems_whole;
757
+
758
+ HVX_Vector in = *(HVX_UVector *) srcf;
759
+
760
+ HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in, val_vec);
761
+ hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out));
762
+ }
763
+ }
764
+
765
+ float hvx_sum_of_squares_f32(const uint8_t * restrict src, const int num_elems) {
766
+ int left_over = num_elems & (VLEN_FP32 - 1);
767
+ int num_elems_whole = num_elems - left_over;
768
+
769
+ if (0 == htp_is_aligned((void *) src, VLEN)) {
770
+ FARF(HIGH, "hvx_sum_of_squares_f32: unaligned address in hvx op, possibly slower execution\n");
771
+ }
772
+
773
+ assert((1 == htp_is_aligned((void *) src, VLEN)) || (0 == num_elems_whole));
774
+
775
+ HVX_Vector * restrict vec_in1 = (HVX_Vector *) src;
776
+
777
+ HVX_Vector sum_vec_acc = Q6_V_vsplat_R(0x00000000);
778
+ HVX_Vector zero_vec = Q6_V_vsplat_R(0x00000000);
779
+
780
+ #pragma unroll(4)
781
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
782
+ HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1, *vec_in1);
783
+ sum_vec_acc = Q6_Vqf32_vadd_Vqf32Vqf32(sum_vec_acc, v);
784
+ vec_in1++;
785
+ }
786
+
787
+ if (left_over > 0) {
788
+ const float * srcf = (const float *) src + num_elems_whole;
789
+
790
+ HVX_Vector vec_left = *(HVX_UVector *) srcf;
791
+
792
+ HVX_Vector vec_left_sq = Q6_Vqf32_vmpy_VsfVsf(vec_left, vec_left);
793
+ HVX_Vector vec_tmp = Q6_V_valign_VVR(vec_left_sq, zero_vec, left_over * SIZEOF_FP32);
794
+
795
+ sum_vec_acc = Q6_Vqf32_vadd_Vqf32Vqf32(sum_vec_acc, vec_tmp);
796
+ }
797
+
798
+ HVX_Vector v = hvx_vec_qf32_reduce_sum(sum_vec_acc);
799
+ return hvx_vec_get_fp32(Q6_Vsf_equals_Vqf32(v));
800
+ }
801
+
802
+ float hvx_self_sum_f32(const uint8_t * restrict src, const int num_elems) {
803
+ int left_over = num_elems & (VLEN_FP32 - 1);
804
+ int num_elems_whole = num_elems - left_over;
805
+
806
+ int unaligned_addr = 0;
807
+ int unaligned_loop = 0;
808
+ if (0 == htp_is_aligned((void *) src, VLEN)) {
809
+ FARF(HIGH, "hvx_self_sum_f32: unaligned address in hvx op, possibly slower execution\n");
810
+ unaligned_addr = 1;
811
+ }
812
+
813
+ if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
814
+ unaligned_loop = 1;
815
+ FARF(HIGH, "hvx_self_sum_f32: unaligned loop in hvx op, possibly slower execution\n");
816
+ }
817
+
818
+ HVX_Vector sum_vec = Q6_V_vsplat_R(0x00000000);
819
+ HVX_Vector zero_vec = Q6_V_vsplat_R(0x00000000);
820
+
821
+ if (0 == unaligned_loop) {
822
+ HVX_Vector * vec_in = (HVX_Vector *) src;
823
+
824
+ #pragma unroll(4)
825
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
826
+ // sum_vec = Q6_Vqf32_vadd_Vqf32Vsf(sum_vec, *vec_in++);
827
+ sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), *vec_in++);
828
+ }
829
+ } else {
830
+ #pragma unroll(4)
831
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
832
+ HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32);
833
+
834
+ sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), in);
835
+ }
836
+ }
837
+
838
+ if (left_over > 0) {
839
+ const float * srcf = (const float *) src + num_elems_whole;
840
+
841
+ HVX_Vector vec_left = *(HVX_UVector *) srcf;
842
+ HVX_Vector vec_tmp = Q6_V_valign_VVR(vec_left, zero_vec, left_over * SIZEOF_FP32);
843
+ // sum_vec = Q6_Vqf32_vadd_Vqf32Vsf(sum_vec, vec_tmp);
844
+ sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), vec_tmp);
845
+ }
846
+
847
+ HVX_Vector v = hvx_vec_qf32_reduce_sum(sum_vec);
848
+ return hvx_vec_get_fp32(Q6_Vsf_equals_Vqf32(v));
849
+ }
850
+
851
+ float hvx_self_max_f32(const uint8_t * restrict src, const int num_elems) {
852
+ int left_over = num_elems & (VLEN_FP32 - 1);
853
+ int num_elems_whole = num_elems - left_over;
854
+
855
+ int unaligned_addr = 0;
856
+ int unaligned_loop = 0;
857
+ if (0 == htp_is_aligned((void *) src, VLEN)) {
858
+ FARF(HIGH, "hvx_self_max_f32: unaligned address in hvx op, possibly slower execution\n");
859
+ unaligned_addr = 1;
860
+ }
861
+
862
+ if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
863
+ unaligned_loop = 1;
864
+ FARF(HIGH, "hvx_self_max_f32: unaligned loop in hvx op, possibly slower execution\n");
865
+ }
866
+
867
+ HVX_Vector vec_max = hvx_vec_splat_fp32(((const float *) src)[0]);
868
+ HVX_Vector vec_first = hvx_vec_splat_fp32(((const float *) src)[0]);
869
+
870
+ if (0 == unaligned_loop) {
871
+ HVX_Vector * restrict vec_in = (HVX_Vector *) src;
872
+
873
+ #pragma unroll(4)
874
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
875
+ vec_max = Q6_Vsf_vmax_VsfVsf(vec_max, *vec_in++);
876
+ }
877
+ } else {
878
+ #pragma unroll(4)
879
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
880
+ HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32);
881
+
882
+ vec_max = Q6_Vsf_vmax_VsfVsf(vec_max, in);
883
+ }
884
+ }
885
+
886
+ if (left_over > 0) {
887
+ const float * srcf = (const float *) src + num_elems_whole;
888
+
889
+ HVX_Vector in = *(HVX_UVector *) srcf;
890
+
891
+ HVX_Vector temp = Q6_V_valign_VVR(in, vec_first, left_over * SIZEOF_FP32);
892
+ vec_max = Q6_Vsf_vmax_VsfVsf(vec_max, temp);
893
+ }
894
+
895
+ HVX_Vector v = hvx_vec_reduce_max_fp32(vec_max);
896
+ return hvx_vec_get_fp32(v);
897
+ }
898
+
899
+ void hvx_min_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) {
900
+ size_t left_over = num_elems & (VLEN_FP32 - 1);
901
+ size_t num_elems_whole = num_elems - left_over;
902
+ int unalign_address = 0;
903
+ if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) {
904
+ FARF(HIGH, "hvx_min_scalar_f32: unaligned address in hvx op, possibly slower execution\n");
905
+ unalign_address = 1;
906
+ }
907
+
908
+ const float * src_f = (const float *) src;
909
+
910
+ HVX_Vector vec_min = hvx_vec_splat_fp32(val);
911
+
912
+ if(unalign_address == 0){
913
+ HVX_Vector * restrict vec_in = (HVX_Vector *) src;
914
+ HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
915
+
916
+ #pragma unroll(4)
917
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
918
+ HVX_Vector min_clamp = Q6_Vsf_vmin_VsfVsf(vec_min, *vec_in++);
919
+ *vec_out++ = (min_clamp);
920
+ }
921
+ }else{
922
+ HVX_UVector * restrict vec_in = (HVX_Vector *) src;
923
+ HVX_UVector * restrict vec_out = (HVX_Vector *) dst;
924
+
925
+ #pragma unroll(4)
926
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
927
+ HVX_Vector min_clamp = Q6_Vsf_vmin_VsfVsf(vec_min, *vec_in++);
928
+ *vec_out++ = (min_clamp);
929
+ }
930
+ }
931
+
932
+ if (left_over > 0 ) {
933
+ const float * srcf = (const float *) src + num_elems_whole;
934
+ float * dstf = (float *) dst + num_elems_whole;
935
+
936
+ HVX_UVector in = *(HVX_UVector *) srcf;
937
+
938
+ HVX_UVector min_clamp = Q6_Vsf_vmin_VsfVsf(vec_min, in);
939
+
940
+ hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, (min_clamp));
941
+ }
942
+ }
943
+
944
+ void hvx_clamp_scalar_f32(const uint8_t * restrict src,
945
+ const float limit_left,
946
+ const float limit_right,
947
+ uint8_t * restrict dst,
948
+ const int num_elems) {
949
+ size_t left_over = num_elems & (VLEN_FP32 - 1);
950
+ size_t num_elems_whole = num_elems - left_over;
951
+
952
+ int unalign_address = 0;
953
+ if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) {
954
+ FARF(HIGH, "hvx_clamp_scalar_f32: unaligned address in hvx op, possibly slower execution\n");
955
+ unalign_address = 1;
956
+ }
957
+
958
+ HVX_Vector range_left = hvx_vec_splat_fp32(limit_left);
959
+ HVX_Vector range_right = hvx_vec_splat_fp32(limit_right);
960
+
961
+ if(unalign_address == 0){
962
+ HVX_Vector * restrict vec_in = (HVX_Vector *) src;
963
+ HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
964
+
965
+
966
+
967
+ #pragma unroll(4)
968
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
969
+ HVX_Vector in_vec = *vec_in++;
970
+ HVX_Vector temp_v = in_vec;
971
+
972
+ HVX_VectorPred pred_cap_right = Q6_Q_vcmp_gt_VsfVsf(in_vec, range_right);
973
+ HVX_VectorPred pred_cap_left = Q6_Q_vcmp_gt_VsfVsf(range_left, in_vec);
974
+
975
+ in_vec = Q6_V_vmux_QVV(pred_cap_right, range_right, temp_v);
976
+ in_vec = Q6_V_vmux_QVV(pred_cap_left, range_left, in_vec);
977
+
978
+ *vec_out++ = in_vec;
979
+ }
980
+
981
+ }else{
982
+
983
+ HVX_UVector * restrict vec_in = (HVX_UVector *) src;
984
+ HVX_UVector * restrict vec_out = (HVX_UVector *) dst;
985
+
986
+ #pragma unroll(4)
987
+ for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
988
+ HVX_Vector in_vec = *vec_in++;
989
+ HVX_Vector temp_v = in_vec;
990
+
991
+ HVX_VectorPred pred_cap_right = Q6_Q_vcmp_gt_VsfVsf(in_vec, range_right);
992
+ HVX_VectorPred pred_cap_left = Q6_Q_vcmp_gt_VsfVsf(range_left, in_vec);
993
+
994
+ in_vec = Q6_V_vmux_QVV(pred_cap_right, range_right, temp_v);
995
+ in_vec = Q6_V_vmux_QVV(pred_cap_left, range_left, in_vec);
996
+
997
+ *vec_out++ = in_vec;
998
+ }
999
+
1000
+ }
1001
+
1002
+ if (left_over > 0) {
1003
+ const float * srcf = (const float *) src + num_elems_whole;
1004
+ float * dstf = (float *) dst + num_elems_whole;
1005
+
1006
+ HVX_Vector in_vec = *(HVX_UVector *) srcf;
1007
+
1008
+ HVX_Vector temp_v = in_vec;
1009
+
1010
+ HVX_VectorPred pred_cap_right = Q6_Q_vcmp_gt_VsfVsf(in_vec, range_right);
1011
+ HVX_VectorPred pred_cap_left = Q6_Q_vcmp_gt_VsfVsf(range_left, in_vec);
1012
+
1013
+ in_vec = Q6_V_vmux_QVV(pred_cap_right, range_right, temp_v);
1014
+ in_vec = Q6_V_vmux_QVV(pred_cap_left, range_left, in_vec);
1015
+
1016
+ hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, in_vec);
1017
+ }
1018
+ }
1019
+
1020
+