whispercpp 1.3.4 → 1.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +1 -1
- data/README.md +158 -44
- data/ext/extconf.rb +3 -2
- data/ext/ruby_whisper.c +34 -6
- data/ext/ruby_whisper.h +67 -0
- data/ext/ruby_whisper_context.c +236 -144
- data/ext/ruby_whisper_context_params.c +163 -0
- data/ext/ruby_whisper_model.c +12 -13
- data/ext/ruby_whisper_params.c +47 -24
- data/ext/ruby_whisper_segment.c +84 -20
- data/ext/ruby_whisper_token.c +371 -0
- data/ext/ruby_whisper_transcribe.cpp +5 -2
- data/ext/ruby_whisper_vad_context.c +122 -0
- data/ext/ruby_whisper_vad_context_detect.cpp +51 -0
- data/ext/ruby_whisper_vad_params.c +0 -1
- data/ext/ruby_whisper_vad_segment.c +138 -0
- data/ext/ruby_whisper_vad_segments.c +105 -0
- data/ext/sources/CMakeLists.txt +4 -1
- data/ext/sources/bindings/javascript/package.json +1 -1
- data/ext/sources/cmake/arm64-apple-clang.cmake +16 -0
- data/ext/sources/cmake/arm64-windows-llvm.cmake +16 -0
- data/ext/sources/cmake/riscv64-spacemit-linux-gnu-gcc.cmake +29 -0
- data/ext/sources/cmake/whisper-config.cmake.in +5 -40
- data/ext/sources/cmake/x64-windows-llvm.cmake +5 -0
- data/ext/sources/examples/addon.node/vad-example.js +2 -2
- data/ext/sources/examples/bench/bench.cpp +23 -18
- data/ext/sources/examples/cli/cli.cpp +129 -112
- data/ext/sources/examples/common-ggml.cpp +2 -0
- data/ext/sources/examples/lsp/CMakeLists.txt +2 -1
- data/ext/sources/examples/miniaudio.h +4507 -2131
- data/ext/sources/examples/quantize/CMakeLists.txt +2 -1
- data/ext/sources/examples/server/server.cpp +28 -15
- data/ext/sources/examples/talk-llama/CMakeLists.txt +8 -3
- data/ext/sources/examples/talk-llama/llama-adapter.cpp +5 -2
- data/ext/sources/examples/talk-llama/llama-adapter.h +7 -0
- data/ext/sources/examples/talk-llama/llama-arch.cpp +2378 -1988
- data/ext/sources/examples/talk-llama/llama-arch.h +109 -2
- data/ext/sources/examples/talk-llama/llama-batch.cpp +78 -34
- data/ext/sources/examples/talk-llama/llama-batch.h +17 -4
- data/ext/sources/examples/talk-llama/llama-chat.cpp +100 -4
- data/ext/sources/examples/talk-llama/llama-chat.h +5 -0
- data/ext/sources/examples/talk-llama/llama-context.cpp +1088 -403
- data/ext/sources/examples/talk-llama/llama-context.h +70 -23
- data/ext/sources/examples/talk-llama/llama-cparams.h +6 -0
- data/ext/sources/examples/talk-llama/llama-ext.h +12 -0
- data/ext/sources/examples/talk-llama/llama-grammar.cpp +295 -60
- data/ext/sources/examples/talk-llama/llama-grammar.h +22 -1
- data/ext/sources/examples/talk-llama/llama-graph.cpp +925 -155
- data/ext/sources/examples/talk-llama/llama-graph.h +234 -23
- data/ext/sources/examples/talk-llama/llama-hparams.cpp +79 -38
- data/ext/sources/examples/talk-llama/llama-hparams.h +118 -18
- data/ext/sources/examples/talk-llama/llama-impl.cpp +11 -7
- data/ext/sources/examples/talk-llama/llama-impl.h +14 -2
- data/ext/sources/examples/talk-llama/llama-kv-cache-iswa.cpp +8 -4
- data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +405 -140
- data/ext/sources/examples/talk-llama/llama-kv-cache.h +24 -10
- data/ext/sources/examples/talk-llama/llama-kv-cells.h +44 -2
- data/ext/sources/examples/talk-llama/llama-memory-hybrid-iswa.cpp +275 -0
- data/ext/sources/examples/talk-llama/llama-memory-hybrid-iswa.h +140 -0
- data/ext/sources/examples/talk-llama/llama-memory-hybrid.cpp +12 -10
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.cpp +42 -31
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.h +2 -2
- data/ext/sources/examples/talk-llama/llama-mmap.cpp +197 -45
- data/ext/sources/examples/talk-llama/llama-mmap.h +8 -3
- data/ext/sources/examples/talk-llama/llama-model-loader.cpp +606 -116
- data/ext/sources/examples/talk-llama/llama-model-loader.h +41 -5
- data/ext/sources/examples/talk-llama/llama-model-saver.cpp +61 -44
- data/ext/sources/examples/talk-llama/llama-model-saver.h +5 -2
- data/ext/sources/examples/talk-llama/llama-model.cpp +2756 -13643
- data/ext/sources/examples/talk-llama/llama-model.h +112 -18
- data/ext/sources/examples/talk-llama/llama-quant.cpp +582 -365
- data/ext/sources/examples/talk-llama/{llama-sampling.cpp → llama-sampler.cpp} +1409 -199
- data/ext/sources/examples/talk-llama/llama-sampler.h +42 -0
- data/ext/sources/examples/talk-llama/llama-vocab.cpp +248 -82
- data/ext/sources/examples/talk-llama/llama-vocab.h +50 -40
- data/ext/sources/examples/talk-llama/llama.cpp +802 -21
- data/ext/sources/examples/talk-llama/llama.h +210 -39
- data/ext/sources/examples/talk-llama/models/afmoe.cpp +190 -0
- data/ext/sources/examples/talk-llama/models/apertus.cpp +125 -0
- data/ext/sources/examples/talk-llama/models/arcee.cpp +135 -0
- data/ext/sources/examples/talk-llama/models/arctic.cpp +137 -0
- data/ext/sources/examples/talk-llama/models/arwkv7.cpp +86 -0
- data/ext/sources/examples/talk-llama/models/baichuan.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/bailingmoe.cpp +143 -0
- data/ext/sources/examples/talk-llama/models/bailingmoe2.cpp +133 -0
- data/ext/sources/examples/talk-llama/models/bert.cpp +184 -0
- data/ext/sources/examples/talk-llama/models/bitnet.cpp +145 -0
- data/ext/sources/examples/talk-llama/models/bloom.cpp +101 -0
- data/ext/sources/examples/talk-llama/models/chameleon.cpp +178 -0
- data/ext/sources/examples/talk-llama/models/chatglm.cpp +132 -0
- data/ext/sources/examples/talk-llama/models/codeshell.cpp +111 -0
- data/ext/sources/examples/talk-llama/models/cogvlm.cpp +102 -0
- data/ext/sources/examples/talk-llama/models/cohere2-iswa.cpp +134 -0
- data/ext/sources/examples/talk-llama/models/command-r.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/dbrx.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/deci.cpp +135 -0
- data/ext/sources/examples/talk-llama/models/deepseek.cpp +142 -0
- data/ext/sources/examples/talk-llama/models/deepseek2.cpp +262 -0
- data/ext/sources/examples/talk-llama/models/delta-net-base.cpp +445 -0
- data/ext/sources/examples/talk-llama/models/dots1.cpp +132 -0
- data/ext/sources/examples/talk-llama/models/dream.cpp +105 -0
- data/ext/sources/examples/talk-llama/models/ernie4-5-moe.cpp +148 -0
- data/ext/sources/examples/talk-llama/models/ernie4-5.cpp +110 -0
- data/ext/sources/examples/talk-llama/models/eurobert.cpp +97 -0
- data/ext/sources/examples/talk-llama/models/exaone-moe.cpp +145 -0
- data/ext/sources/examples/talk-llama/models/exaone.cpp +114 -0
- data/ext/sources/examples/talk-llama/models/exaone4.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/falcon-h1.cpp +111 -0
- data/ext/sources/examples/talk-llama/models/falcon.cpp +120 -0
- data/ext/sources/examples/talk-llama/models/gemma-embedding.cpp +116 -0
- data/ext/sources/examples/talk-llama/models/gemma.cpp +112 -0
- data/ext/sources/examples/talk-llama/models/gemma2-iswa.cpp +128 -0
- data/ext/sources/examples/talk-llama/models/gemma3.cpp +155 -0
- data/ext/sources/examples/talk-llama/models/gemma3n-iswa.cpp +384 -0
- data/ext/sources/examples/talk-llama/models/glm4-moe.cpp +170 -0
- data/ext/sources/examples/talk-llama/models/glm4.cpp +157 -0
- data/ext/sources/examples/talk-llama/models/gpt2.cpp +105 -0
- data/ext/sources/examples/talk-llama/models/gptneox.cpp +144 -0
- data/ext/sources/examples/talk-llama/models/granite-hybrid.cpp +195 -0
- data/ext/sources/examples/talk-llama/models/granite.cpp +210 -0
- data/ext/sources/examples/talk-llama/models/grok.cpp +159 -0
- data/ext/sources/examples/talk-llama/models/grovemoe.cpp +139 -0
- data/ext/sources/examples/talk-llama/models/hunyuan-dense.cpp +132 -0
- data/ext/sources/examples/talk-llama/models/hunyuan-moe.cpp +153 -0
- data/ext/sources/examples/talk-llama/models/internlm2.cpp +120 -0
- data/ext/sources/examples/talk-llama/models/jais.cpp +86 -0
- data/ext/sources/examples/talk-llama/models/jais2.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/jamba.cpp +106 -0
- data/ext/sources/examples/talk-llama/models/kimi-linear.cpp +381 -0
- data/ext/sources/examples/talk-llama/models/lfm2.cpp +196 -0
- data/ext/sources/examples/talk-llama/models/llada-moe.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/llada.cpp +99 -0
- data/ext/sources/examples/talk-llama/models/llama-iswa.cpp +178 -0
- data/ext/sources/examples/talk-llama/models/llama.cpp +175 -0
- data/ext/sources/examples/talk-llama/models/maincoder.cpp +117 -0
- data/ext/sources/examples/talk-llama/models/mamba-base.cpp +289 -0
- data/ext/sources/examples/talk-llama/models/mamba.cpp +54 -0
- data/ext/sources/examples/talk-llama/models/mimo2-iswa.cpp +129 -0
- data/ext/sources/examples/talk-llama/models/minicpm3.cpp +200 -0
- data/ext/sources/examples/talk-llama/models/minimax-m2.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/mistral3.cpp +160 -0
- data/ext/sources/examples/talk-llama/models/models.h +704 -0
- data/ext/sources/examples/talk-llama/models/modern-bert.cpp +109 -0
- data/ext/sources/examples/talk-llama/models/mpt.cpp +126 -0
- data/ext/sources/examples/talk-llama/models/nemotron-h.cpp +162 -0
- data/ext/sources/examples/talk-llama/models/nemotron.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/neo-bert.cpp +104 -0
- data/ext/sources/examples/talk-llama/models/olmo.cpp +121 -0
- data/ext/sources/examples/talk-llama/models/olmo2.cpp +150 -0
- data/ext/sources/examples/talk-llama/models/olmoe.cpp +124 -0
- data/ext/sources/examples/talk-llama/models/openai-moe-iswa.cpp +127 -0
- data/ext/sources/examples/talk-llama/models/openelm.cpp +124 -0
- data/ext/sources/examples/talk-llama/models/orion.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/paddleocr.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/pangu-embedded.cpp +121 -0
- data/ext/sources/examples/talk-llama/models/phi2.cpp +121 -0
- data/ext/sources/examples/talk-llama/models/phi3.cpp +152 -0
- data/ext/sources/examples/talk-llama/models/plamo.cpp +110 -0
- data/ext/sources/examples/talk-llama/models/plamo2.cpp +320 -0
- data/ext/sources/examples/talk-llama/models/plamo3.cpp +128 -0
- data/ext/sources/examples/talk-llama/models/plm.cpp +169 -0
- data/ext/sources/examples/talk-llama/models/qwen.cpp +108 -0
- data/ext/sources/examples/talk-llama/models/qwen2.cpp +126 -0
- data/ext/sources/examples/talk-llama/models/qwen2moe.cpp +151 -0
- data/ext/sources/examples/talk-llama/models/qwen2vl.cpp +117 -0
- data/ext/sources/examples/talk-llama/models/qwen3.cpp +120 -0
- data/ext/sources/examples/talk-llama/models/qwen35.cpp +381 -0
- data/ext/sources/examples/talk-llama/models/qwen35moe.cpp +422 -0
- data/ext/sources/examples/talk-llama/models/qwen3moe.cpp +131 -0
- data/ext/sources/examples/talk-llama/models/qwen3next.cpp +525 -0
- data/ext/sources/examples/talk-llama/models/qwen3vl-moe.cpp +140 -0
- data/ext/sources/examples/talk-llama/models/qwen3vl.cpp +132 -0
- data/ext/sources/examples/talk-llama/models/refact.cpp +94 -0
- data/ext/sources/examples/talk-llama/models/rnd1.cpp +126 -0
- data/ext/sources/examples/talk-llama/models/rwkv6-base.cpp +164 -0
- data/ext/sources/examples/talk-llama/models/rwkv6.cpp +94 -0
- data/ext/sources/examples/talk-llama/models/rwkv6qwen2.cpp +86 -0
- data/ext/sources/examples/talk-llama/models/rwkv7-base.cpp +137 -0
- data/ext/sources/examples/talk-llama/models/rwkv7.cpp +90 -0
- data/ext/sources/examples/talk-llama/models/seed-oss.cpp +124 -0
- data/ext/sources/examples/talk-llama/models/smallthinker.cpp +126 -0
- data/ext/sources/examples/talk-llama/models/smollm3.cpp +128 -0
- data/ext/sources/examples/talk-llama/models/stablelm.cpp +146 -0
- data/ext/sources/examples/talk-llama/models/starcoder.cpp +100 -0
- data/ext/sources/examples/talk-llama/models/starcoder2.cpp +121 -0
- data/ext/sources/examples/talk-llama/models/step35-iswa.cpp +165 -0
- data/ext/sources/examples/talk-llama/models/t5-dec.cpp +166 -0
- data/ext/sources/examples/talk-llama/models/t5-enc.cpp +96 -0
- data/ext/sources/examples/talk-llama/models/wavtokenizer-dec.cpp +149 -0
- data/ext/sources/examples/talk-llama/models/xverse.cpp +108 -0
- data/ext/sources/examples/talk-llama/unicode.cpp +121 -79
- data/ext/sources/examples/vad-speech-segments/CMakeLists.txt +1 -1
- data/ext/sources/examples/whisper.wasm/index-tmpl.html +1 -1
- data/ext/sources/ggml/CMakeLists.txt +90 -56
- data/ext/sources/ggml/include/ggml-alloc.h +9 -0
- data/ext/sources/ggml/include/ggml-backend.h +5 -2
- data/ext/sources/ggml/include/ggml-cann.h +1 -1
- data/ext/sources/ggml/include/ggml-cpu.h +6 -0
- data/ext/sources/ggml/include/ggml-hexagon.h +19 -0
- data/ext/sources/ggml/include/ggml-openvino.h +37 -0
- data/ext/sources/ggml/include/ggml-opt.h +1 -1
- data/ext/sources/ggml/include/ggml-rpc.h +14 -12
- data/ext/sources/ggml/include/ggml-virtgpu.h +14 -0
- data/ext/sources/ggml/include/ggml-zendnn.h +22 -0
- data/ext/sources/ggml/include/ggml.h +246 -21
- data/ext/sources/ggml/src/CMakeLists.txt +85 -11
- data/ext/sources/ggml/src/ggml-alloc.c +128 -50
- data/ext/sources/ggml/src/ggml-backend-dl.cpp +48 -0
- data/ext/sources/ggml/src/ggml-backend-dl.h +45 -0
- data/ext/sources/ggml/src/ggml-backend-impl.h +1 -4
- data/ext/sources/ggml/src/ggml-backend-reg.cpp +54 -88
- data/ext/sources/ggml/src/ggml-backend.cpp +76 -23
- data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +18 -4
- data/ext/sources/ggml/src/ggml-blas/ggml-blas.cpp +11 -11
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.cpp +58 -46
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.h +139 -48
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.cpp +2427 -1785
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +238 -362
- data/ext/sources/ggml/src/ggml-cann/common.h +285 -211
- data/ext/sources/ggml/src/ggml-cann/ggml-cann.cpp +663 -831
- data/ext/sources/ggml/src/ggml-common.h +11 -0
- data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +170 -95
- data/ext/sources/ggml/src/ggml-cpu/amx/amx.cpp +42 -18
- data/ext/sources/ggml/src/ggml-cpu/amx/common.h +34 -10
- data/ext/sources/ggml/src/ggml-cpu/amx/mmq.cpp +85 -85
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp +4 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/quants.c +513 -27
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/repack.cpp +4192 -992
- data/ext/sources/ggml/src/ggml-cpu/arch/loongarch/quants.c +4 -5
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/cpu-feats.cpp +38 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/quants.c +1761 -49
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/repack.cpp +1391 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/s390/cpu-feats.cpp +50 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/s390/quants.c +8 -10
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/quants.c +9 -9
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/repack.cpp +124 -24
- data/ext/sources/ggml/src/ggml-cpu/arch-fallback.h +157 -28
- data/ext/sources/ggml/src/ggml-cpu/binary-ops.cpp +2 -6
- data/ext/sources/ggml/src/ggml-cpu/common.h +8 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-impl.h +8 -3
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +251 -80
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.cpp +19 -0
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +587 -119
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.h +33 -44
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +1093 -194
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +1284 -203
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.h +6 -0
- data/ext/sources/ggml/src/ggml-cpu/ops.cpp +1519 -527
- data/ext/sources/ggml/src/ggml-cpu/ops.h +6 -4
- data/ext/sources/ggml/src/ggml-cpu/quants.c +40 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.h +3 -0
- data/ext/sources/ggml/src/ggml-cpu/repack.cpp +3632 -781
- data/ext/sources/ggml/src/ggml-cpu/repack.h +129 -4
- data/ext/sources/ggml/src/ggml-cpu/simd-gemm.h +136 -0
- data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +152 -46
- data/ext/sources/ggml/src/ggml-cpu/spacemit/ime.cpp +3 -2
- data/ext/sources/ggml/src/ggml-cpu/unary-ops.cpp +152 -1
- data/ext/sources/ggml/src/ggml-cpu/unary-ops.h +7 -0
- data/ext/sources/ggml/src/ggml-cpu/vec.cpp +140 -0
- data/ext/sources/ggml/src/ggml-cpu/vec.h +261 -146
- data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +72 -1
- data/ext/sources/ggml/src/ggml-cuda/argmax.cu +2 -2
- data/ext/sources/ggml/src/ggml-cuda/argsort.cu +132 -6
- data/ext/sources/ggml/src/ggml-cuda/argsort.cuh +16 -0
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +33 -31
- data/ext/sources/ggml/src/ggml-cuda/common.cuh +474 -85
- data/ext/sources/ggml/src/ggml-cuda/convert.cu +41 -27
- data/ext/sources/ggml/src/ggml-cuda/convert.cuh +10 -0
- data/ext/sources/ggml/src/ggml-cuda/cpy-utils.cuh +1 -1
- data/ext/sources/ggml/src/ggml-cuda/cpy.cu +342 -246
- data/ext/sources/ggml/src/ggml-cuda/cpy.cuh +1 -5
- data/ext/sources/ggml/src/ggml-cuda/cumsum.cu +307 -0
- data/ext/sources/ggml/src/ggml-cuda/cumsum.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/diag.cu +77 -0
- data/ext/sources/ggml/src/ggml-cuda/diag.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +98 -74
- data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +973 -665
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cu +35 -741
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cuh +1255 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec.cuh +33 -40
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +40 -18
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +48 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn.cu +206 -45
- data/ext/sources/ggml/src/ggml-cuda/fill.cu +37 -0
- data/ext/sources/ggml/src/ggml-cuda/fill.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/gated_delta_net.cu +263 -0
- data/ext/sources/ggml/src/ggml-cuda/gated_delta_net.cuh +4 -0
- data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +1688 -302
- data/ext/sources/ggml/src/ggml-cuda/mean.cu +12 -10
- data/ext/sources/ggml/src/ggml-cuda/mma.cuh +908 -48
- data/ext/sources/ggml/src/ggml-cuda/mmf.cu +88 -20
- data/ext/sources/ggml/src/ggml-cuda/mmf.cuh +502 -90
- data/ext/sources/ggml/src/ggml-cuda/mmid.cu +164 -0
- data/ext/sources/ggml/src/ggml-cuda/mmid.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/mmq.cu +69 -176
- data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +532 -193
- data/ext/sources/ggml/src/ggml-cuda/mmvf.cu +460 -104
- data/ext/sources/ggml/src/ggml-cuda/mmvf.cuh +5 -2
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +360 -122
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cuh +2 -1
- data/ext/sources/ggml/src/ggml-cuda/norm.cu +18 -76
- data/ext/sources/ggml/src/ggml-cuda/pad.cu +73 -39
- data/ext/sources/ggml/src/ggml-cuda/quantize.cu +152 -1
- data/ext/sources/ggml/src/ggml-cuda/quantize.cuh +14 -0
- data/ext/sources/ggml/src/ggml-cuda/reduce_rows.cuh +2 -16
- data/ext/sources/ggml/src/ggml-cuda/rope.cu +364 -149
- data/ext/sources/ggml/src/ggml-cuda/rope.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/set-rows.cu +101 -47
- data/ext/sources/ggml/src/ggml-cuda/set.cu +39 -0
- data/ext/sources/ggml/src/ggml-cuda/set.cuh +7 -0
- data/ext/sources/ggml/src/ggml-cuda/softmax.cu +163 -41
- data/ext/sources/ggml/src/ggml-cuda/solve_tri.cu +275 -0
- data/ext/sources/ggml/src/ggml-cuda/solve_tri.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +68 -50
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cuh +1 -1
- data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cu +49 -84
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_32.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_32.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq112-dv112.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq128-dv128.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq256-dv256.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq40-dv40.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq576-dv512.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq64-dv64.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq72-dv72.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq80-dv80.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq96-dv96.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +22 -4
- data/ext/sources/ggml/src/ggml-cuda/top-k.cu +95 -0
- data/ext/sources/ggml/src/ggml-cuda/top-k.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cu +275 -119
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cuh +20 -7
- data/ext/sources/ggml/src/ggml-cuda/tri.cu +136 -0
- data/ext/sources/ggml/src/ggml-cuda/tri.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/unary.cu +160 -11
- data/ext/sources/ggml/src/ggml-cuda/unary.cuh +38 -0
- data/ext/sources/ggml/src/ggml-cuda/upscale.cu +163 -7
- data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +31 -17
- data/ext/sources/ggml/src/ggml-cuda/vendors/cuda.h +4 -0
- data/ext/sources/ggml/src/ggml-cuda/vendors/hip.h +22 -1
- data/ext/sources/ggml/src/ggml-cuda/vendors/musa.h +6 -0
- data/ext/sources/ggml/src/ggml-hexagon/CMakeLists.txt +117 -0
- data/ext/sources/ggml/src/ggml-hexagon/ggml-hexagon.cpp +3325 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/CMakeLists.txt +46 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/act-ops.c +813 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/argsort-ops.c +281 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/binary-ops.c +891 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/cmake-toolchain.cmake +157 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/cpy-ops.c +252 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/flash-attn-ops.c +713 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/get-rows-ops.c +112 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-dma.c +63 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-dma.h +182 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-dump.h +77 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-fastdiv.h +37 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-utils.h +51 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ctx.h +35 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-msg.h +155 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ops.h +63 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp_iface.idl +16 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-arith.h +443 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-base.h +240 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-copy.h +245 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-div.h +251 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-dump.h +129 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-exp.h +215 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-floor.h +100 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-inverse.h +210 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-reduce.h +296 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-scale.h +133 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sigmoid.h +141 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sqrt.h +126 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-types.h +36 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-utils.h +26 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/main.c +1199 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/matmul-ops.c +2670 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/rope-ops.c +497 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/set-rows-ops.c +168 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/softmax-ops.c +419 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/ssm-conv.c +339 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/sum-rows-ops.c +128 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/unary-ops.c +382 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/worker-pool.c +293 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/worker-pool.h +57 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp-drv.cpp +418 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp-drv.h +121 -0
- data/ext/sources/ggml/src/ggml-hexagon/libdl.h +79 -0
- data/ext/sources/ggml/src/ggml-hexagon/libggml-htp.inf +38 -0
- data/ext/sources/ggml/src/ggml-hexagon/op-desc.h +153 -0
- data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +14 -13
- data/ext/sources/ggml/src/ggml-impl.h +129 -6
- data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +10 -10
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-common.cpp +15 -4
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.h +8 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.m +173 -34
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.cpp +912 -344
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.h +124 -59
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.m +588 -144
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +396 -23
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.cpp +1724 -421
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.h +16 -3
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.cpp +333 -114
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.metal +3050 -1539
- data/ext/sources/ggml/src/ggml-musa/CMakeLists.txt +3 -1
- data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +30 -1
- data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +4279 -497
- data/ext/sources/ggml/src/ggml-opencl/kernels/concat.cl +41 -99
- data/ext/sources/ggml/src/ggml-opencl/kernels/cpy.cl +45 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cumsum.cl +139 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +267 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/diag.cl +27 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/exp.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/expm1.cl +113 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/fill.cl +17 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/flash_attn_f32.cl +4 -3
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemm_moe_mxfp4_f32.cl +162 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemm_noshuffle_q4_1_f32.cl +132 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_moe_mxfp4_f32.cl +156 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general_q8_0_f32.cl +195 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_q4_1_f32.cl +283 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/get_rows.cl +36 -12
- data/ext/sources/ggml/src/ggml-opencl/kernels/l2_norm.cl +71 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mean.cl +140 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f16_f32_kq_kqv.cl +273 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f16_f32_l4_lm.cl +24 -10
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f32_f32_l4_lm.cl +24 -10
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q4_0_f32_l4_lm.cl +163 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q4_1_f32_l4_lm.cl +165 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q6_k_f32_l4_lm.cl +158 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q8_0_f32_8x4.cl +129 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q8_0_f32_l4_lm.cl +154 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_1_f32.cl +219 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_1_f32_flat.cl +229 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_k_f32.cl +180 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/{mul_mv_q6_k.cl → mul_mv_q6_k_f32.cl} +4 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q6_k_f32_flat.cl +194 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/neg.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/pad.cl +29 -20
- data/ext/sources/ggml/src/ggml-opencl/kernels/repeat.cl +31 -32
- data/ext/sources/ggml/src/ggml-opencl/kernels/rms_norm.cl +25 -10
- data/ext/sources/ggml/src/ggml-opencl/kernels/rope.cl +50 -24
- data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +14 -4
- data/ext/sources/ggml/src/ggml-opencl/kernels/set_rows.cl +35 -16
- data/ext/sources/ggml/src/ggml-opencl/kernels/softplus.cl +116 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/solve_tri.cl +51 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sqr.cl +53 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sqrt.cl +53 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/ssm_conv.cl +77 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sum_rows.cl +114 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/tanh.cl +94 -48
- data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +39 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/tri.cl +32 -0
- data/ext/sources/ggml/src/ggml-openvino/.clang-format +154 -0
- data/ext/sources/ggml/src/ggml-openvino/CMakeLists.txt +22 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-decoder.cpp +975 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-decoder.h +294 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino-extra.cpp +373 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino-extra.h +182 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino.cpp +1110 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-quants.cpp +884 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-quants.h +153 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/decoder.h +74 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/frontend.cpp +27 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/frontend.h +23 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/input_model.cpp +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/input_model.h +29 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/node_context.h +112 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/cont.cpp +48 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/cpy.cpp +21 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/flash_attn_ext.cpp +90 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/get_rows.cpp +69 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/glu_geglu.cpp +61 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/glu_swiglu.cpp +62 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/mulmat.cpp +90 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/permute.cpp +102 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/reshape.cpp +83 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/rms_norm.cpp +46 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/rope.cpp +123 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/scale.cpp +41 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/set_rows.cpp +76 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/softmax.cpp +89 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/transpose.cpp +23 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/unary_silu.cpp +27 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/view.cpp +53 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op_table.cpp +46 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op_table.h +39 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/eliminate_zp.cpp +123 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/eliminate_zp.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/fuse_to_sdpa.cpp +60 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/fuse_to_sdpa.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/mark_decompression_convert_constant_folding.h +29 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/squeeze_matmul.cpp +58 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/squeeze_matmul.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/translate_session.cpp +293 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/translate_session.h +28 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/utils.cpp +226 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/utils.h +85 -0
- data/ext/sources/ggml/src/ggml-openvino/utils.cpp +823 -0
- data/ext/sources/ggml/src/ggml-openvino/utils.h +123 -0
- data/ext/sources/ggml/src/ggml-quants.c +96 -5
- data/ext/sources/ggml/src/ggml-quants.h +3 -0
- data/ext/sources/ggml/src/ggml-rpc/ggml-rpc.cpp +438 -156
- data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +59 -87
- data/ext/sources/ggml/src/ggml-sycl/add-id.cpp +81 -0
- data/ext/sources/ggml/src/ggml-sycl/add-id.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/backend.hpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +21 -29
- data/ext/sources/ggml/src/ggml-sycl/binbcast.hpp +0 -6
- data/ext/sources/ggml/src/ggml-sycl/common.hpp +427 -20
- data/ext/sources/ggml/src/ggml-sycl/concat.cpp +55 -44
- data/ext/sources/ggml/src/ggml-sycl/convert.cpp +103 -1
- data/ext/sources/ggml/src/ggml-sycl/convert.hpp +22 -1
- data/ext/sources/ggml/src/ggml-sycl/count-equal.cpp +79 -0
- data/ext/sources/ggml/src/ggml-sycl/count-equal.hpp +9 -0
- data/ext/sources/ggml/src/ggml-sycl/cpy.cpp +0 -3
- data/ext/sources/ggml/src/ggml-sycl/dequantize.hpp +18 -0
- data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +867 -50
- data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +401 -358
- data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +12 -2
- data/ext/sources/ggml/src/ggml-sycl/fattn-common.hpp +1179 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-tile.cpp +55 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-tile.hpp +1338 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-vec.hpp +667 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn.cpp +225 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn.hpp +22 -0
- data/ext/sources/ggml/src/ggml-sycl/gated_delta_net.cpp +309 -0
- data/ext/sources/ggml/src/ggml-sycl/gated_delta_net.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/ggml-sycl.cpp +645 -155
- data/ext/sources/ggml/src/ggml-sycl/mmvq.cpp +22 -0
- data/ext/sources/ggml/src/ggml-sycl/norm.cpp +221 -66
- data/ext/sources/ggml/src/ggml-sycl/norm.hpp +2 -0
- data/ext/sources/ggml/src/ggml-sycl/outprod.cpp +3 -3
- data/ext/sources/ggml/src/ggml-sycl/pad.cpp +97 -0
- data/ext/sources/ggml/src/ggml-sycl/pad.hpp +24 -0
- data/ext/sources/ggml/src/ggml-sycl/pad_reflect_1d.cpp +100 -0
- data/ext/sources/ggml/src/ggml-sycl/pad_reflect_1d.hpp +10 -0
- data/ext/sources/ggml/src/ggml-sycl/presets.hpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/quants.hpp +1 -1
- data/ext/sources/ggml/src/ggml-sycl/repeat_back.cpp +76 -0
- data/ext/sources/ggml/src/ggml-sycl/repeat_back.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/roll.cpp +122 -0
- data/ext/sources/ggml/src/ggml-sycl/roll.hpp +20 -0
- data/ext/sources/ggml/src/ggml-sycl/rope.cpp +457 -281
- data/ext/sources/ggml/src/ggml-sycl/rope.hpp +6 -0
- data/ext/sources/ggml/src/ggml-sycl/set.cpp +73 -0
- data/ext/sources/ggml/src/ggml-sycl/set.hpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/softmax.cpp +327 -162
- data/ext/sources/ggml/src/ggml-sycl/softmax.hpp +4 -0
- data/ext/sources/ggml/src/ggml-sycl/ssm_conv.cpp +127 -0
- data/ext/sources/ggml/src/ggml-sycl/ssm_conv.hpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq112-dv112.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq128-dv128.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq256-dv256.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq40-dv40.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq576-dv512.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq64-dv64.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq72-dv72.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq80-dv80.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq96-dv96.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +71 -0
- data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +1 -1
- data/ext/sources/ggml/src/ggml-virtgpu/CMakeLists.txt +70 -0
- data/ext/sources/ggml/src/ggml-virtgpu/apir_cs_ggml-rpc-front.cpp +87 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/CMakeLists.txt +21 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/apir_cs_ggml-rpc-back.cpp +115 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-convert.h +13 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-backend.cpp +102 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-buffer-type.cpp +105 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-buffer.cpp +179 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-device.cpp +148 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.cpp +51 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.gen.h +73 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.h +27 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-virgl-apir.h +32 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend.cpp +144 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/api_remoting.h +95 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_backend.gen.h +94 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_backend.h +50 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs.h +378 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs_ggml.h +232 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs_rpc.h +58 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-buffer-type.cpp +81 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-buffer.cpp +119 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-device.cpp +158 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-reg.cpp +213 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend.cpp +69 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-remoting.h +71 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggmlremoting_functions.yaml +166 -0
- data/ext/sources/ggml/src/ggml-virtgpu/include/apir_hw.h +9 -0
- data/ext/sources/ggml/src/ggml-virtgpu/regenerate_remoting.py +333 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-apir.h +15 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-backend.cpp +58 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-buffer-type.cpp +110 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-buffer.cpp +173 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-device.cpp +192 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-impl.h +36 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward.gen.h +53 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-shm.cpp +98 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-shm.h +23 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-utils.cpp +179 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-utils.h +86 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu.cpp +544 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu.h +117 -0
- data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +39 -19
- data/ext/sources/ggml/src/ggml-vulkan/ggml-vulkan.cpp +5994 -3055
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/abs.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +18 -10
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add1.comp +28 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add_id.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/arange.comp +20 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +33 -26
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort_large.comp +114 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/ceil.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp +47 -49
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv_transpose_1d.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +4 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_transpose.comp +67 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/count_equal.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/count_experts.comp +51 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cumsum.comp +83 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cumsum_multipass1.comp +60 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cumsum_multipass2.comp +66 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{dequant_funcs.comp → dequant_funcs.glsl} +9 -21
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{dequant_funcs_cm2.comp → dequant_funcs_cm2.glsl} +18 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{dequant_head.comp → dequant_head.glsl} +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_m.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_s.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_mxfp4.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/diag.comp +29 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/div.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/elu.comp +27 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/exp.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/fill.comp +19 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +386 -160
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{flash_attn_base.comp → flash_attn_base.glsl} +82 -20
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +400 -174
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +123 -37
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_mask_opt.comp +162 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +10 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/floor.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gated_delta_net.comp +128 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu_erf.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu_quick.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_erf.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{generic_binary_head.comp → generic_binary_head.glsl} +17 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{generic_head.comp → generic_head.glsl} +2 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{generic_unary_head.comp → generic_unary_head.glsl} +7 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +4 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{glu_head.comp → glu_head.glsl} +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/hardsigmoid.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/hardswish.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +19 -7
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col_3d.comp +2 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +13 -10
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/log.comp +18 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{mul_mat_vec_base.comp → mul_mat_vec_base.glsl} +77 -29
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iface.glsl +35 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_m.comp +71 -21
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp +41 -25
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_s.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xs.comp +44 -26
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xxs.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_s.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_xxs.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +9 -7
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp +9 -7
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +4 -6
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +4 -6
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +4 -6
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vecq.comp +39 -36
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vecq_funcs.glsl +494 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +88 -105
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +41 -26
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{mul_mm_funcs.comp → mul_mm_funcs.glsl} +69 -59
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_id_funcs.glsl +74 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +92 -230
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.glsl +454 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_shmem_types.glsl +78 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/multi_add.comp +97 -13
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/neg.comp +20 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_adamw.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_sgd.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +21 -6
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +10 -10
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/reglu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat_back.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +49 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_back.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_partials.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/roll.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_funcs.glsl +207 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.glsl +20 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +8 -49
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +8 -32
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +8 -32
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_params.glsl +33 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +8 -38
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/round.comp +29 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sgn.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sigmoid.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu_back.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_large1.comp +62 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_large2.comp +79 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_large3.comp +65 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_large_common.glsl +53 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/softplus.comp +23 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/solve_tri.comp +81 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sqrt.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/square.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp +50 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/ssm_scan.comp +124 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/step.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sub.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +2 -25
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.glsl +25 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/swiglu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/swiglu_oai.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/topk_argsort.comp +118 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/topk_moe.comp +213 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/topk_nary_search.comp +246 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/tri.comp +43 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/trunc.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{types.comp → types.glsl} +345 -26
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +90 -12
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +384 -180
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/xielu.comp +35 -0
- data/ext/sources/ggml/src/ggml-webgpu/CMakeLists.txt +28 -2
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu-shader-lib.hpp +1374 -0
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu.cpp +2544 -726
- data/ext/sources/ggml/src/ggml-webgpu/pre_wgsl.hpp +778 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argmax.wgsl +72 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argsort.wgsl +106 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argsort_merge.wgsl +134 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary.wgsl +141 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/common_decls.tmpl +65 -72
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/concat.wgsl +75 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cpy.tmpl.wgsl +107 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cumsum.wgsl +66 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/embed_wgsl.py +73 -15
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/flash_attn.wgsl +636 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{get_rows.tmpl.wgsl → get_rows.wgsl} +53 -259
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/glu.tmpl.wgsl +323 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat.tmpl.wgsl → mul_mat.wgsl} +72 -261
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_decls.tmpl +766 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_reg_tile.wgsl +147 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_subgroup_matrix.wgsl +196 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_vec.wgsl +480 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/pad.wgsl +86 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/repeat.wgsl +67 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm.wgsl +83 -17
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rope.tmpl.wgsl +295 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/scale.wgsl +63 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.wgsl +40 -12
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/soft_max.tmpl.wgsl +345 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/sum_rows.wgsl +55 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/unary.wgsl +193 -0
- data/ext/sources/ggml/src/ggml-zdnn/ggml-zdnn.cpp +6 -1
- data/ext/sources/ggml/src/ggml-zendnn/CMakeLists.txt +91 -0
- data/ext/sources/ggml/src/ggml-zendnn/ggml-zendnn.cpp +469 -0
- data/ext/sources/ggml/src/ggml.c +590 -64
- data/ext/sources/ggml/src/gguf.cpp +229 -44
- data/ext/sources/include/whisper.h +1 -0
- data/ext/sources/src/CMakeLists.txt +3 -1
- data/ext/sources/src/whisper.cpp +106 -62
- data/ext/sources/tests/CMakeLists.txt +2 -2
- data/ext/sources/tests/test-vad-full.cpp +4 -2
- data/ext/sources/tests/test-vad.cpp +1 -1
- data/extsources.rb +1 -0
- data/lib/whisper/model/uri.rb +17 -18
- data/sig/whisper.rbs +162 -4
- data/test/test_context_params.rb +82 -0
- data/test/test_params.rb +16 -8
- data/test/test_segment.rb +0 -1
- data/test/test_token.rb +81 -0
- data/test/test_vad.rb +1 -1
- data/test/test_vad_context.rb +100 -0
- data/test/test_vad_segment.rb +19 -0
- data/test/test_vad_segments.rb +16 -0
- data/test/test_whisper.rb +27 -0
- data/whispercpp.gemspec +1 -1
- metadata +502 -37
- data/ext/sources/build-xcframework.sh +0 -571
- data/ext/sources/examples/talk-llama/llama-sampling.h +0 -32
- data/ext/sources/ggml/cmake/BuildTypes.cmake +0 -54
- data/ext/sources/ggml/src/ggml-cann/Doxyfile +0 -2579
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +0 -105
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +0 -55
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/add.tmpl.wgsl +0 -44
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/add_in_place.tmpl.wgsl +0 -41
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary_head.tmpl +0 -45
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cpy.wgsl +0 -60
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul.tmpl.wgsl +0 -44
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_in_place.tmpl.wgsl +0 -41
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm_in_place.wgsl +0 -48
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{test_bfloat16_support.comp → feature-tests/bfloat16.comp} +0 -0
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{test_coopmat_support.comp → feature-tests/coopmat.comp} +0 -0
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{test_coopmat2_support.comp → feature-tests/coopmat2.comp} +0 -0
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{test_integer_dot_support.comp → feature-tests/integer_dot.comp} +0 -0
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{glu_main.comp → glu_main.glsl} +0 -0
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{rte.comp → rte.glsl} +0 -0
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{utils.comp → utils.glsl} +0 -0
|
@@ -0,0 +1,2670 @@
|
|
|
1
|
+
#pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
|
|
2
|
+
#pragma clang diagnostic ignored "-Wunused-function"
|
|
3
|
+
#pragma clang diagnostic ignored "-Wunused-variable"
|
|
4
|
+
#pragma clang diagnostic ignored "-Wunused-but-set-variable"
|
|
5
|
+
|
|
6
|
+
#include <HAP_farf.h>
|
|
7
|
+
#include <HAP_perf.h>
|
|
8
|
+
|
|
9
|
+
#include <math.h>
|
|
10
|
+
#include <string.h>
|
|
11
|
+
|
|
12
|
+
#include "hex-dma.h"
|
|
13
|
+
#include "hvx-utils.h"
|
|
14
|
+
#include "hvx-dump.h"
|
|
15
|
+
|
|
16
|
+
#define GGML_COMMON_DECL_C
|
|
17
|
+
#include "ggml-common.h"
|
|
18
|
+
#include "htp-ctx.h"
|
|
19
|
+
#include "htp-msg.h"
|
|
20
|
+
#include "htp-ops.h"
|
|
21
|
+
|
|
22
|
+
#define MM_SPAD_SRC0_NROWS 16
|
|
23
|
+
#define MM_SPAD_SRC1_NROWS 16
|
|
24
|
+
#define MM_SPAD_DST_NROWS 2
|
|
25
|
+
|
|
26
|
+
struct htp_matmul_context {
|
|
27
|
+
const char * type;
|
|
28
|
+
struct htp_ops_context * octx;
|
|
29
|
+
|
|
30
|
+
void (*vec_dot_1x1)(const int n, float * restrict s0,
|
|
31
|
+
const void * restrict vx0,
|
|
32
|
+
const void * restrict vy0);
|
|
33
|
+
|
|
34
|
+
void (*vec_dot_2x1)(const int n, float * restrict s0,
|
|
35
|
+
const void * restrict vx0, const void * restrict vx1,
|
|
36
|
+
const void * restrict vy0);
|
|
37
|
+
|
|
38
|
+
void (*vec_dot_2x2)(const int n, float * restrict s0, float * restrict s1,
|
|
39
|
+
const void * restrict vx0, const void * restrict vx1,
|
|
40
|
+
const void * restrict vy0, const void * restrict vy1);
|
|
41
|
+
|
|
42
|
+
// Precomputed values
|
|
43
|
+
uint32_t src0_nrows_per_thread;
|
|
44
|
+
uint32_t src1_nrows_per_thread;
|
|
45
|
+
|
|
46
|
+
struct fastdiv_values mm_div_ne12_ne1;
|
|
47
|
+
struct fastdiv_values mm_div_ne1;
|
|
48
|
+
struct fastdiv_values mm_div_r2;
|
|
49
|
+
struct fastdiv_values mm_div_r3;
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
// vdelta control to expand first 32 e8m0 values into 32 uint32 elements
|
|
53
|
+
static const uint8_t __attribute__((aligned(128))) expand_x32_e8m0[128] = {
|
|
54
|
+
0x00, 0x00, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x02, 0x00, 0x08, 0x08, 0x01, 0x02, 0x00, 0x04, 0x04, 0x00, 0x00,
|
|
55
|
+
0x00, 0x11, 0x10, 0x10, 0x10, 0x02, 0x00, 0x04, 0x00, 0x01, 0x02, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00, 0x01, 0x04,
|
|
56
|
+
0x00, 0x00, 0x22, 0x20, 0x20, 0x20, 0x21, 0x22, 0x20, 0x24, 0x04, 0x00, 0x00, 0x00, 0x09, 0x08, 0x00, 0x00, 0x02,
|
|
57
|
+
0x00, 0x04, 0x00, 0x11, 0x12, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x01, 0x04, 0x00, 0x00, 0x02, 0x00, 0x08, 0x08,
|
|
58
|
+
0x01, 0x02, 0x00, 0x04, 0x44, 0x40, 0x40, 0x40, 0x41, 0x40, 0x40, 0x40, 0x42, 0x40, 0x44, 0x40, 0x41, 0x42, 0x48,
|
|
59
|
+
0x48, 0x08, 0x08, 0x00, 0x00, 0x01, 0x04, 0x00, 0x00, 0x12, 0x10, 0x10, 0x10, 0x01, 0x02, 0x00, 0x04, 0x04, 0x00,
|
|
60
|
+
0x00, 0x00, 0x09, 0x08, 0x00, 0x00, 0x22, 0x20, 0x24, 0x20, 0x21, 0x22, 0x20, 0x20,
|
|
61
|
+
};
|
|
62
|
+
|
|
63
|
+
static const uint8_t __attribute__((aligned(VLEN))) kvalues_mxfp4_lut[] = {
|
|
64
|
+
0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 6, 0, 8, 0, 12, 0, 0, 0, 0xff, 0, 0xfe, 0, 0xfd, 0, 0xfc, 0,
|
|
65
|
+
0xfa, 0, 0xf8, 0, 0xf4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
66
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
67
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
68
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
69
|
+
};
|
|
70
|
+
|
|
71
|
+
// q4x4x2 and q8x4x2 are the flat q4/8_0 formats where all quants are stored first followed by all scales
|
|
72
|
+
|
|
73
|
+
static inline size_t q8x4x2_row_size(uint32_t ne) {
|
|
74
|
+
// ensures perfect alignment of quants and full row
|
|
75
|
+
const uint32_t qk = QK_Q8_0x4x2;
|
|
76
|
+
const uint32_t nb = (ne + qk - 1) / qk;
|
|
77
|
+
return hex_round_up(ne + nb * 8 * sizeof(__fp16), 128);
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
static inline HVX_Vector_x8 hvx_vec_load_q4x4x8_full(const uint8_t * restrict ptr) {
|
|
81
|
+
const HVX_Vector * restrict vptr = (const HVX_Vector *) ptr;
|
|
82
|
+
|
|
83
|
+
HVX_Vector v0_1 = vptr[0]; // first 256 elements (128 bytes)
|
|
84
|
+
HVX_Vector v2_3 = vptr[1]; // ...
|
|
85
|
+
HVX_Vector v4_5 = vptr[2]; // ...
|
|
86
|
+
HVX_Vector v6_7 = vptr[3]; // ...
|
|
87
|
+
|
|
88
|
+
const HVX_Vector mask_h4 = Q6_Vb_vsplat_R(0x0F);
|
|
89
|
+
const HVX_Vector i8 = Q6_Vb_vsplat_R(8);
|
|
90
|
+
|
|
91
|
+
HVX_Vector v0 = Q6_V_vand_VV(v0_1, mask_h4); // & 0x0F : first 128 elements
|
|
92
|
+
HVX_Vector v1 = Q6_Vub_vlsr_VubR(v0_1, 4); // >> 4 : second 128 elements
|
|
93
|
+
HVX_Vector v2 = Q6_V_vand_VV(v2_3, mask_h4); // & 0x0F ...
|
|
94
|
+
HVX_Vector v3 = Q6_Vub_vlsr_VubR(v2_3, 4); // >> 4
|
|
95
|
+
HVX_Vector v4 = Q6_V_vand_VV(v4_5, mask_h4); // & 0x0F
|
|
96
|
+
HVX_Vector v5 = Q6_Vub_vlsr_VubR(v4_5, 4); // >> 4
|
|
97
|
+
HVX_Vector v6 = Q6_V_vand_VV(v6_7, mask_h4); // & 0x0F
|
|
98
|
+
HVX_Vector v7 = Q6_Vub_vlsr_VubR(v6_7, 4); // >> 4
|
|
99
|
+
|
|
100
|
+
// Convert uint4 to int4 (i.e. x - 8)
|
|
101
|
+
v0 = Q6_Vb_vsub_VbVb(v0, i8);
|
|
102
|
+
v1 = Q6_Vb_vsub_VbVb(v1, i8);
|
|
103
|
+
v2 = Q6_Vb_vsub_VbVb(v2, i8);
|
|
104
|
+
v3 = Q6_Vb_vsub_VbVb(v3, i8);
|
|
105
|
+
v4 = Q6_Vb_vsub_VbVb(v4, i8);
|
|
106
|
+
v5 = Q6_Vb_vsub_VbVb(v5, i8);
|
|
107
|
+
v6 = Q6_Vb_vsub_VbVb(v6, i8);
|
|
108
|
+
v7 = Q6_Vb_vsub_VbVb(v7, i8);
|
|
109
|
+
|
|
110
|
+
HVX_Vector_x8 r = { v0, v1, v2, v3, v4, v5, v6, v7 };
|
|
111
|
+
return r;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
static HVX_Vector_x8 hvx_vec_load_q4x4x8_partial(const uint8_t * restrict ptr, uint32_t n) {
|
|
115
|
+
const HVX_Vector * restrict vptr = (const HVX_Vector *) ptr;
|
|
116
|
+
|
|
117
|
+
const uint32_t qk = QK_Q4_0x4x2; // 256
|
|
118
|
+
const uint32_t nb = n / qk;
|
|
119
|
+
const uint32_t nloe = n % qk;
|
|
120
|
+
|
|
121
|
+
const HVX_Vector mask_h4 = Q6_Vb_vsplat_R(0x0F);
|
|
122
|
+
const HVX_Vector i8 = Q6_Vb_vsplat_R(8);
|
|
123
|
+
|
|
124
|
+
HVX_Vector_x8 r;
|
|
125
|
+
uint32_t i = 0;
|
|
126
|
+
|
|
127
|
+
#pragma unroll(2)
|
|
128
|
+
for (i=0; i < nb; i++) {
|
|
129
|
+
HVX_Vector v = vptr[i]; // 256 elements (128 bytes)
|
|
130
|
+
HVX_Vector v0 = Q6_V_vand_VV(v, mask_h4); // & 0x0F : first 128 elements
|
|
131
|
+
HVX_Vector v1 = Q6_Vub_vlsr_VubR(v, 4); // >> 4 : second 128 elements
|
|
132
|
+
r.v[i*2+0] = Q6_Vb_vsub_VbVb(v0, i8);
|
|
133
|
+
r.v[i*2+1] = Q6_Vb_vsub_VbVb(v1, i8);
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
if (nloe) {
|
|
137
|
+
HVX_Vector v = vptr[i]; // 256 elements (128 bytes)
|
|
138
|
+
HVX_Vector v0 = Q6_V_vand_VV(v, mask_h4); // & 0x0F : even 128 elements
|
|
139
|
+
HVX_Vector v1 = Q6_Vub_vlsr_VubR(v, 4); // >> 4 : odd 128 elements
|
|
140
|
+
HVX_VectorPair v0_1_p = Q6_W_vshuff_VVR(v1, v0, -1); // zip even:odd:...
|
|
141
|
+
r.v[i*2+0] = Q6_Vb_vsub_VbVb(Q6_V_lo_W(v0_1_p), i8);
|
|
142
|
+
r.v[i*2+1] = Q6_Vb_vsub_VbVb(Q6_V_hi_W(v0_1_p), i8);
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
return r;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
static inline HVX_Vector_x8 hvx_vec_load_mxfp4x4x8_full(const uint8_t * restrict ptr) {
|
|
149
|
+
const HVX_Vector * restrict vptr = (const HVX_Vector *) ptr;
|
|
150
|
+
|
|
151
|
+
HVX_Vector v0_1 = vptr[0]; // first 256 elements (128 bytes)
|
|
152
|
+
HVX_Vector v2_3 = vptr[1]; // ...
|
|
153
|
+
HVX_Vector v4_5 = vptr[2]; // ...
|
|
154
|
+
HVX_Vector v6_7 = vptr[3]; // ...
|
|
155
|
+
|
|
156
|
+
const HVX_Vector mask_h4 = Q6_Vb_vsplat_R(0x0F);
|
|
157
|
+
const HVX_Vector lut = *(const HVX_Vector *) kvalues_mxfp4_lut;
|
|
158
|
+
|
|
159
|
+
HVX_Vector v0 = Q6_V_vand_VV(v0_1, mask_h4); // & 0x0F
|
|
160
|
+
HVX_Vector v1 = Q6_Vub_vlsr_VubR(v0_1, 4); // >> 4
|
|
161
|
+
HVX_Vector v2 = Q6_V_vand_VV(v2_3, mask_h4); // & 0x0F
|
|
162
|
+
HVX_Vector v3 = Q6_Vub_vlsr_VubR(v2_3, 4); // >> 4
|
|
163
|
+
HVX_Vector v4 = Q6_V_vand_VV(v4_5, mask_h4); // & 0x0F
|
|
164
|
+
HVX_Vector v5 = Q6_Vub_vlsr_VubR(v4_5, 4); // >> 4
|
|
165
|
+
HVX_Vector v6 = Q6_V_vand_VV(v6_7, mask_h4); // & 0x0F
|
|
166
|
+
HVX_Vector v7 = Q6_Vub_vlsr_VubR(v6_7, 4); // >> 4
|
|
167
|
+
|
|
168
|
+
v0 = Q6_Vb_vlut32_VbVbI(v0, lut, 0);
|
|
169
|
+
v1 = Q6_Vb_vlut32_VbVbI(v1, lut, 0);
|
|
170
|
+
v2 = Q6_Vb_vlut32_VbVbI(v2, lut, 0);
|
|
171
|
+
v3 = Q6_Vb_vlut32_VbVbI(v3, lut, 0);
|
|
172
|
+
v4 = Q6_Vb_vlut32_VbVbI(v4, lut, 0);
|
|
173
|
+
v5 = Q6_Vb_vlut32_VbVbI(v5, lut, 0);
|
|
174
|
+
v6 = Q6_Vb_vlut32_VbVbI(v6, lut, 0);
|
|
175
|
+
v7 = Q6_Vb_vlut32_VbVbI(v7, lut, 0);
|
|
176
|
+
|
|
177
|
+
HVX_Vector_x8 r = { v0, v1, v2, v3, v4, v5, v6, v7 };
|
|
178
|
+
return r;
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
static inline HVX_Vector_x8 hvx_vec_load_mxfp4x4x8_partial(const uint8_t * restrict ptr, uint32_t n) {
|
|
182
|
+
const HVX_Vector * restrict vptr = (const HVX_Vector *) ptr;
|
|
183
|
+
|
|
184
|
+
const uint32_t qk = QK_Q4_0x4x2; // 256
|
|
185
|
+
const uint32_t nb = n / qk;
|
|
186
|
+
const uint32_t nloe = n % qk;
|
|
187
|
+
|
|
188
|
+
const HVX_Vector mask_h4 = Q6_Vb_vsplat_R(0x0F);
|
|
189
|
+
const HVX_Vector lut = *(const HVX_Vector *) kvalues_mxfp4_lut;
|
|
190
|
+
|
|
191
|
+
HVX_Vector_x8 r;
|
|
192
|
+
uint32_t i = 0;
|
|
193
|
+
|
|
194
|
+
#pragma unroll(2)
|
|
195
|
+
for (i=0; i < nb; i++) {
|
|
196
|
+
HVX_Vector v = vptr[i]; // 256 elements (128 bytes)
|
|
197
|
+
HVX_Vector v0 = Q6_V_vand_VV(v, mask_h4); // & 0x0F : first 128 elements
|
|
198
|
+
HVX_Vector v1 = Q6_Vub_vlsr_VubR(v, 4); // >> 4 : second 128 elements
|
|
199
|
+
r.v[i*2+0] = Q6_Vb_vlut32_VbVbI(v0, lut, 0);
|
|
200
|
+
r.v[i*2+1] = Q6_Vb_vlut32_VbVbI(v1, lut, 0);
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
if (nloe) {
|
|
204
|
+
HVX_Vector v = vptr[i]; // 256 elements (128 bytes)
|
|
205
|
+
HVX_Vector v0 = Q6_V_vand_VV(v, mask_h4); // & 0x0F : even 128 elements
|
|
206
|
+
HVX_Vector v1 = Q6_Vub_vlsr_VubR(v, 4); // >> 4 : odd 128 elements
|
|
207
|
+
HVX_VectorPair v0_1_p = Q6_W_vshuff_VVR(v1, v0, -1); // zip even:odd:...
|
|
208
|
+
r.v[i*2+0] = Q6_Vb_vlut32_VbVbI(Q6_V_lo_W(v0_1_p), lut, 0);
|
|
209
|
+
r.v[i*2+1] = Q6_Vb_vlut32_VbVbI(Q6_V_hi_W(v0_1_p), lut, 0);
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
return r;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
static inline HVX_Vector_x8 hvx_vec_load_q8x4x8_full(const uint8_t * restrict ptr) {
|
|
216
|
+
const HVX_Vector * restrict vptr = (const HVX_Vector *) ptr;
|
|
217
|
+
|
|
218
|
+
HVX_Vector v0 = vptr[0]; // first 128 vals
|
|
219
|
+
HVX_Vector v1 = vptr[1]; // ...
|
|
220
|
+
HVX_Vector v2 = vptr[2]; // ...
|
|
221
|
+
HVX_Vector v3 = vptr[3]; // ...
|
|
222
|
+
HVX_Vector v4 = vptr[4]; // ...
|
|
223
|
+
HVX_Vector v5 = vptr[5]; // ...
|
|
224
|
+
HVX_Vector v6 = vptr[6]; // ...
|
|
225
|
+
HVX_Vector v7 = vptr[7]; // ...
|
|
226
|
+
|
|
227
|
+
HVX_Vector_x8 r = { v0, v1, v2, v3, v4, v5, v6, v7 };
|
|
228
|
+
return r;
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
static inline HVX_Vector_x8 hvx_vec_load_q8x4x8_partial(const uint8_t * restrict ptr, uint32_t nloe) {
|
|
232
|
+
return hvx_vec_load_q8x4x8_full(ptr);
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
// Reduce multiply 1024 x 1024 int8 elements (32x q4/8 blocks in 8x HVX vectors).
|
|
236
|
+
// Accumulate each block into a single int32 value.
|
|
237
|
+
// Return a single HVX vector with 32x int32 accumulators.
|
|
238
|
+
// This version is parameterized to support less than 1024 elements.
|
|
239
|
+
// if() checks are optimized out at compile time -- make sure to pass N as a constexpr.
|
|
240
|
+
|
|
241
|
+
static inline HVX_Vector hvx_vec_rmpy_x8_n(HVX_Vector_x8 x, HVX_Vector_x8 y, unsigned int n) {
|
|
242
|
+
HVX_Vector r0 = Q6_V_vzero();
|
|
243
|
+
HVX_Vector r1 = Q6_V_vzero();
|
|
244
|
+
HVX_Vector r2 = Q6_V_vzero();
|
|
245
|
+
HVX_Vector r3 = Q6_V_vzero();
|
|
246
|
+
HVX_Vector r4 = Q6_V_vzero();
|
|
247
|
+
HVX_Vector r5 = Q6_V_vzero();
|
|
248
|
+
HVX_Vector r6 = Q6_V_vzero();
|
|
249
|
+
HVX_Vector r7 = Q6_V_vzero();
|
|
250
|
+
|
|
251
|
+
HVX_VectorPair p3;
|
|
252
|
+
HVX_VectorPair p2;
|
|
253
|
+
HVX_VectorPair p1;
|
|
254
|
+
HVX_VectorPair p0;
|
|
255
|
+
|
|
256
|
+
if (n >= 128) { r0 = Q6_Vw_vrmpy_VbVb(x.v[0], y.v[0]); }
|
|
257
|
+
if (n >= 256) { r1 = Q6_Vw_vrmpy_VbVb(x.v[1], y.v[1]); }
|
|
258
|
+
if (n >= 384) { r2 = Q6_Vw_vrmpy_VbVb(x.v[2], y.v[2]); }
|
|
259
|
+
if (n >= 512) { r3 = Q6_Vw_vrmpy_VbVb(x.v[3], y.v[3]); }
|
|
260
|
+
if (n >= 640) { r4 = Q6_Vw_vrmpy_VbVb(x.v[4], y.v[4]); }
|
|
261
|
+
if (n >= 768) { r5 = Q6_Vw_vrmpy_VbVb(x.v[5], y.v[5]); }
|
|
262
|
+
if (n >= 896) { r6 = Q6_Vw_vrmpy_VbVb(x.v[6], y.v[6]); }
|
|
263
|
+
if (n >= 1024) { r7 = Q6_Vw_vrmpy_VbVb(x.v[7], y.v[7]); }
|
|
264
|
+
|
|
265
|
+
if (n >= 128) { p0 = Q6_W_vdeal_VVR(r1, r0, -4); }
|
|
266
|
+
if (n >= 384) { p1 = Q6_W_vdeal_VVR(r3, r2, -4); }
|
|
267
|
+
if (n >= 640) { p2 = Q6_W_vdeal_VVR(r5, r4, -4); }
|
|
268
|
+
if (n >= 896) { p3 = Q6_W_vdeal_VVR(r7, r6, -4); }
|
|
269
|
+
|
|
270
|
+
if (n >= 128) { r0 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p0), Q6_V_hi_W(p0)); }
|
|
271
|
+
if (n >= 384) { r1 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p1), Q6_V_hi_W(p1)); }
|
|
272
|
+
if (n >= 640) { r2 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p2), Q6_V_hi_W(p2)); }
|
|
273
|
+
if (n >= 896) { r3 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p3), Q6_V_hi_W(p3)); }
|
|
274
|
+
|
|
275
|
+
if (n >= 128) { p0 = Q6_W_vdeal_VVR(r1, r0, -4); }
|
|
276
|
+
if (n >= 640) { p1 = Q6_W_vdeal_VVR(r3, r2, -4); }
|
|
277
|
+
|
|
278
|
+
if (n >= 128) { r0 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p0), Q6_V_hi_W(p0)); }
|
|
279
|
+
if (n >= 640) { r1 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p1), Q6_V_hi_W(p1)); }
|
|
280
|
+
|
|
281
|
+
if (n >= 128) { p0 = Q6_W_vdeal_VVR(r1, r0, -4); }
|
|
282
|
+
if (n >= 128) { r0 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p0), Q6_V_hi_W(p0)); }
|
|
283
|
+
|
|
284
|
+
return r0;
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
static inline HVX_Vector hvx_vec_rmpy_x8_full(HVX_Vector_x8 x, HVX_Vector_x8 y) {
|
|
288
|
+
HVX_Vector r0 = Q6_Vw_vrmpy_VbVb(x.v[0], y.v[0]);
|
|
289
|
+
HVX_Vector r1 = Q6_Vw_vrmpy_VbVb(x.v[1], y.v[1]);
|
|
290
|
+
HVX_Vector r2 = Q6_Vw_vrmpy_VbVb(x.v[2], y.v[2]);
|
|
291
|
+
HVX_Vector r3 = Q6_Vw_vrmpy_VbVb(x.v[3], y.v[3]);
|
|
292
|
+
HVX_Vector r4 = Q6_Vw_vrmpy_VbVb(x.v[4], y.v[4]);
|
|
293
|
+
HVX_Vector r5 = Q6_Vw_vrmpy_VbVb(x.v[5], y.v[5]);
|
|
294
|
+
HVX_Vector r6 = Q6_Vw_vrmpy_VbVb(x.v[6], y.v[6]);
|
|
295
|
+
HVX_Vector r7 = Q6_Vw_vrmpy_VbVb(x.v[7], y.v[7]);
|
|
296
|
+
|
|
297
|
+
HVX_VectorPair p0 = Q6_W_vdeal_VVR(r1, r0, -4);
|
|
298
|
+
HVX_VectorPair p1 = Q6_W_vdeal_VVR(r3, r2, -4);
|
|
299
|
+
HVX_VectorPair p2 = Q6_W_vdeal_VVR(r5, r4, -4);
|
|
300
|
+
HVX_VectorPair p3 = Q6_W_vdeal_VVR(r7, r6, -4);
|
|
301
|
+
|
|
302
|
+
r0 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p0), Q6_V_hi_W(p0));
|
|
303
|
+
r1 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p1), Q6_V_hi_W(p1));
|
|
304
|
+
r2 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p2), Q6_V_hi_W(p2));
|
|
305
|
+
r3 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p3), Q6_V_hi_W(p3));
|
|
306
|
+
|
|
307
|
+
p0 = Q6_W_vdeal_VVR(r1, r0, -4);
|
|
308
|
+
p1 = Q6_W_vdeal_VVR(r3, r2, -4);
|
|
309
|
+
|
|
310
|
+
r0 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p0), Q6_V_hi_W(p0));
|
|
311
|
+
r1 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p1), Q6_V_hi_W(p1));
|
|
312
|
+
|
|
313
|
+
p0 = Q6_W_vdeal_VVR(r1, r0, -4);
|
|
314
|
+
r0 = Q6_Vw_vadd_VwVw(Q6_V_lo_W(p0), Q6_V_hi_W(p0));
|
|
315
|
+
|
|
316
|
+
return r0;
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
static inline HVX_Vector hvx_vec_rmpy_x8_partial(HVX_Vector_x8 x, HVX_Vector_x8 y, unsigned int n) {
|
|
320
|
+
if (n >= 512)
|
|
321
|
+
return hvx_vec_rmpy_x8_full(x, y);
|
|
322
|
+
|
|
323
|
+
return hvx_vec_rmpy_x8_partial(x, y, 512);
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
static void vec_dot_q4x4x2_q8x4x2_1x1(const int n, float * restrict s0, const void * restrict vx0, const void * restrict vy0) {
|
|
327
|
+
assert(n % 32 == 0); // min sub-block size
|
|
328
|
+
assert((unsigned long) vx0 % 128 == 0);
|
|
329
|
+
assert((unsigned long) vy0 % 128 == 0);
|
|
330
|
+
|
|
331
|
+
const uint32_t qk = QK_Q4_0x4x2 * 4;
|
|
332
|
+
|
|
333
|
+
const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
334
|
+
const uint32_t x_qblk_size = qk / 2; // int4
|
|
335
|
+
const uint32_t x_qrow_size = n / 2; // int4 (not padded)
|
|
336
|
+
|
|
337
|
+
const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
338
|
+
const uint32_t y_qblk_size = qk; // int8
|
|
339
|
+
const uint32_t y_qrow_size = n; // int8 (not padded)
|
|
340
|
+
|
|
341
|
+
const uint8_t * restrict r0_x_q = ((const uint8_t *) vx0 + 0); // quants first
|
|
342
|
+
const uint8_t * restrict r0_x_d = ((const uint8_t *) vx0 + x_qrow_size); // then scales
|
|
343
|
+
|
|
344
|
+
const uint8_t * restrict y_q = ((const uint8_t *) vy0 + 0); // quants first
|
|
345
|
+
const uint8_t * restrict y_d = ((const uint8_t *) vy0 + y_qrow_size); // then scales
|
|
346
|
+
|
|
347
|
+
// Row sum (sf)
|
|
348
|
+
HVX_Vector r0_sum = Q6_V_vzero();
|
|
349
|
+
|
|
350
|
+
// Multiply and accumulate into int32.
|
|
351
|
+
// Compute combined scale (fp32).
|
|
352
|
+
// Apply scale to acc and accumulate into the row sum (qf32).
|
|
353
|
+
|
|
354
|
+
const uint32_t nb = n / qk; // num full blocks
|
|
355
|
+
const uint32_t nloe = n % qk; // num leftover elemements
|
|
356
|
+
|
|
357
|
+
uint32_t i = 0;
|
|
358
|
+
for (; i < nb; i++) {
|
|
359
|
+
HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8_full(y_q + i * y_qblk_size);
|
|
360
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8_full(r0_x_q + i * x_qblk_size);
|
|
361
|
+
|
|
362
|
+
HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q));
|
|
363
|
+
|
|
364
|
+
HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size));
|
|
365
|
+
HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size));
|
|
366
|
+
|
|
367
|
+
HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d)));
|
|
368
|
+
|
|
369
|
+
HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd);
|
|
370
|
+
|
|
371
|
+
r0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_fa, r0_sum));
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
// Process leftovers
|
|
375
|
+
if (nloe) {
|
|
376
|
+
HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8_partial(y_q + i * y_qblk_size, nloe);
|
|
377
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8_partial(r0_x_q + i * x_qblk_size, nloe);
|
|
378
|
+
|
|
379
|
+
HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r0_q, vy_q, nloe));
|
|
380
|
+
|
|
381
|
+
HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size));
|
|
382
|
+
HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size));
|
|
383
|
+
|
|
384
|
+
HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d)));
|
|
385
|
+
|
|
386
|
+
// Zero out unused elements
|
|
387
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8);
|
|
388
|
+
r0_dd = Q6_V_vand_QV(bmask, r0_dd);
|
|
389
|
+
r0_ia = Q6_V_vand_QV(bmask, r0_ia);
|
|
390
|
+
|
|
391
|
+
HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd);
|
|
392
|
+
|
|
393
|
+
r0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_fa, r0_sum));
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
r0_sum = hvx_vec_reduce_sum_f32(r0_sum);
|
|
397
|
+
|
|
398
|
+
hvx_vec_store_u(s0, 4, r0_sum);
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
static void vec_dot_q4x4x2_q8x4x2_2x1(const int n, float * restrict s0,
|
|
402
|
+
const void * restrict vx0, const void * restrict vx1,
|
|
403
|
+
const void * restrict vy0) {
|
|
404
|
+
assert(n % 32 == 0); // min sub-block size
|
|
405
|
+
assert((unsigned long) vx0 % 128 == 0);
|
|
406
|
+
assert((unsigned long) vx1 % 128 == 0);
|
|
407
|
+
assert((unsigned long) vy0 % 128 == 0);
|
|
408
|
+
|
|
409
|
+
const uint32_t qk = QK_Q4_0x4x2 * 4;
|
|
410
|
+
|
|
411
|
+
const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
412
|
+
const uint32_t x_qblk_size = qk / 2; // int4
|
|
413
|
+
const uint32_t x_qrow_size = n / 2; // int4 (not padded)
|
|
414
|
+
|
|
415
|
+
const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
416
|
+
const uint32_t y_qblk_size = qk; // int8
|
|
417
|
+
const uint32_t y_qrow_size = n; // int8 (not padded)
|
|
418
|
+
|
|
419
|
+
const uint8_t * restrict r0_x_q = ((const uint8_t *) vx0) + 0; // quants first
|
|
420
|
+
const uint8_t * restrict r0_x_d = ((const uint8_t *) vx0) + x_qrow_size; // then scales
|
|
421
|
+
const uint8_t * restrict r1_x_q = ((const uint8_t *) vx1) + 0; // quants first
|
|
422
|
+
const uint8_t * restrict r1_x_d = ((const uint8_t *) vx1) + x_qrow_size; // then scales
|
|
423
|
+
|
|
424
|
+
const uint8_t * restrict y_q = ((const uint8_t *) vy0 + 0); // quants first
|
|
425
|
+
const uint8_t * restrict y_d = ((const uint8_t *) vy0 + y_qrow_size); // then scales
|
|
426
|
+
|
|
427
|
+
// Row sum (sf)
|
|
428
|
+
HVX_Vector r0_sum = Q6_V_vzero();
|
|
429
|
+
HVX_Vector r1_sum = Q6_V_vzero();
|
|
430
|
+
|
|
431
|
+
// Multiply and accumulate into int32.
|
|
432
|
+
// Compute combined scale (fp32).
|
|
433
|
+
// Apply scale to acc and accumulate into the row sum (qf32).
|
|
434
|
+
|
|
435
|
+
const uint32_t nb = n / qk; // num full blocks
|
|
436
|
+
const uint32_t nloe = n % qk; // num leftover elemements
|
|
437
|
+
|
|
438
|
+
uint32_t i = 0;
|
|
439
|
+
for (; i < nb; i++) {
|
|
440
|
+
HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8_full(y_q + i * y_qblk_size);
|
|
441
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8_full(r0_x_q + i * x_qblk_size);
|
|
442
|
+
HVX_Vector_x8 r1_q = hvx_vec_load_q4x4x8_full(r1_x_q + i * x_qblk_size);
|
|
443
|
+
|
|
444
|
+
HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q));
|
|
445
|
+
HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy_q));
|
|
446
|
+
|
|
447
|
+
HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size));
|
|
448
|
+
HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size));
|
|
449
|
+
HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size));
|
|
450
|
+
|
|
451
|
+
HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d)));
|
|
452
|
+
HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy_d)));
|
|
453
|
+
|
|
454
|
+
HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd);
|
|
455
|
+
HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd);
|
|
456
|
+
|
|
457
|
+
r0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_fa, r0_sum));
|
|
458
|
+
r1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_fa, r1_sum));
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
// Process leftovers
|
|
462
|
+
if (nloe) {
|
|
463
|
+
HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8_partial(y_q + i * y_qblk_size, nloe);
|
|
464
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8_partial(r0_x_q + i * x_qblk_size, nloe);
|
|
465
|
+
HVX_Vector_x8 r1_q = hvx_vec_load_q4x4x8_partial(r1_x_q + i * x_qblk_size, nloe);
|
|
466
|
+
|
|
467
|
+
HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r0_q, vy_q, nloe));
|
|
468
|
+
HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r1_q, vy_q, nloe));
|
|
469
|
+
|
|
470
|
+
HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size));
|
|
471
|
+
HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size));
|
|
472
|
+
HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size));
|
|
473
|
+
|
|
474
|
+
HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d)));
|
|
475
|
+
HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy_d)));
|
|
476
|
+
|
|
477
|
+
// Zero out unused elements
|
|
478
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8);
|
|
479
|
+
r0_dd = Q6_V_vand_QV(bmask, r0_dd);
|
|
480
|
+
r1_dd = Q6_V_vand_QV(bmask, r1_dd);
|
|
481
|
+
r0_ia = Q6_V_vand_QV(bmask, r0_ia);
|
|
482
|
+
r1_ia = Q6_V_vand_QV(bmask, r1_ia);
|
|
483
|
+
|
|
484
|
+
HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd);
|
|
485
|
+
HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd);
|
|
486
|
+
|
|
487
|
+
r0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_fa, r0_sum));
|
|
488
|
+
r1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_fa, r1_sum));
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
HVX_Vector rsum = hvx_vec_reduce_sum_f32x2(r0_sum, r1_sum);
|
|
492
|
+
hvx_vec_store_u(s0, 8, rsum);
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
static void vec_dot_q4x4x2_q8x4x2_2x2(const int n, float * restrict s0, float * restrict s1,
|
|
496
|
+
const void * restrict vx0, const void * restrict vx1,
|
|
497
|
+
const void * restrict vy0, const void * restrict vy1) {
|
|
498
|
+
assert(n % 32 == 0);
|
|
499
|
+
assert((unsigned long) vx0 % 128 == 0);
|
|
500
|
+
assert((unsigned long) vx1 % 128 == 0);
|
|
501
|
+
assert((unsigned long) vy0 % 128 == 0);
|
|
502
|
+
assert((unsigned long) vy1 % 128 == 0);
|
|
503
|
+
|
|
504
|
+
const uint32_t qk = QK_Q4_0x4x2 * 4;
|
|
505
|
+
|
|
506
|
+
const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
507
|
+
const uint32_t x_qblk_size = qk / 2; // int4
|
|
508
|
+
const uint32_t x_qrow_size = n / 2; // int4 (not padded)
|
|
509
|
+
|
|
510
|
+
const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
511
|
+
const uint32_t y_qblk_size = qk; // int8
|
|
512
|
+
const uint32_t y_qrow_size = n; // int8 (not padded)
|
|
513
|
+
|
|
514
|
+
const uint8_t * restrict r0_x_q = ((const uint8_t *) vx0) + 0; // quants first
|
|
515
|
+
const uint8_t * restrict r0_x_d = ((const uint8_t *) vx0) + x_qrow_size; // then scales
|
|
516
|
+
const uint8_t * restrict r1_x_q = ((const uint8_t *) vx1) + 0; // quants first
|
|
517
|
+
const uint8_t * restrict r1_x_d = ((const uint8_t *) vx1) + x_qrow_size; // then scales
|
|
518
|
+
|
|
519
|
+
const uint8_t * restrict y0_q = ((const uint8_t *) vy0) + 0; // quants first
|
|
520
|
+
const uint8_t * restrict y0_d = ((const uint8_t *) vy0) + y_qrow_size; // then scales
|
|
521
|
+
const uint8_t * restrict y1_q = ((const uint8_t *) vy1) + 0; // quants first
|
|
522
|
+
const uint8_t * restrict y1_d = ((const uint8_t *) vy1) + y_qrow_size; // then scales
|
|
523
|
+
|
|
524
|
+
// Row sums (sf) - 4 accumulators for 2×2 tile
|
|
525
|
+
HVX_Vector r0_c0_sum = Q6_V_vzero();
|
|
526
|
+
HVX_Vector r0_c1_sum = Q6_V_vzero();
|
|
527
|
+
HVX_Vector r1_c0_sum = Q6_V_vzero();
|
|
528
|
+
HVX_Vector r1_c1_sum = Q6_V_vzero();
|
|
529
|
+
|
|
530
|
+
const uint32_t nb = n / qk; // num full blocks
|
|
531
|
+
const uint32_t nloe = n % qk; // num leftover elements
|
|
532
|
+
|
|
533
|
+
uint32_t i = 0;
|
|
534
|
+
for (; i < nb; i++) {
|
|
535
|
+
// Load src1 columns (reused across both src0 rows)
|
|
536
|
+
HVX_Vector_x8 vy0_q = hvx_vec_load_q8x4x8_full(y0_q + i * y_qblk_size);
|
|
537
|
+
HVX_Vector_x8 vy1_q = hvx_vec_load_q8x4x8_full(y1_q + i * y_qblk_size);
|
|
538
|
+
|
|
539
|
+
// Load src0 rows (reused across both src1 columns)
|
|
540
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8_full(r0_x_q + i * x_qblk_size);
|
|
541
|
+
HVX_Vector_x8 r1_q = hvx_vec_load_q4x4x8_full(r1_x_q + i * x_qblk_size);
|
|
542
|
+
|
|
543
|
+
// Compute 4 dot products: r0×c0, r0×c1, r1×c0, r1×c1
|
|
544
|
+
HVX_Vector r0_c0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy0_q));
|
|
545
|
+
HVX_Vector r0_c1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy1_q));
|
|
546
|
+
HVX_Vector r1_c0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy0_q));
|
|
547
|
+
HVX_Vector r1_c1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy1_q));
|
|
548
|
+
|
|
549
|
+
// Load scales
|
|
550
|
+
HVX_Vector vy0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y0_d + i * y_dblk_size));
|
|
551
|
+
HVX_Vector vy1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y1_d + i * y_dblk_size));
|
|
552
|
+
HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size));
|
|
553
|
+
HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size));
|
|
554
|
+
|
|
555
|
+
// Compute combined scales
|
|
556
|
+
HVX_Vector r0_c0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy0_d)));
|
|
557
|
+
HVX_Vector r0_c1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy1_d)));
|
|
558
|
+
HVX_Vector r1_c0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy0_d)));
|
|
559
|
+
HVX_Vector r1_c1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy1_d)));
|
|
560
|
+
|
|
561
|
+
// Apply scales and accumulate
|
|
562
|
+
HVX_Vector r0_c0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_c0_ia, r0_c0_dd);
|
|
563
|
+
HVX_Vector r0_c1_fa = Q6_Vqf32_vmpy_VsfVsf(r0_c1_ia, r0_c1_dd);
|
|
564
|
+
HVX_Vector r1_c0_fa = Q6_Vqf32_vmpy_VsfVsf(r1_c0_ia, r1_c0_dd);
|
|
565
|
+
HVX_Vector r1_c1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_c1_ia, r1_c1_dd);
|
|
566
|
+
|
|
567
|
+
r0_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_c0_fa, r0_c0_sum));
|
|
568
|
+
r0_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_c1_fa, r0_c1_sum));
|
|
569
|
+
r1_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_c0_fa, r1_c0_sum));
|
|
570
|
+
r1_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_c1_fa, r1_c1_sum));
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
// Process leftovers
|
|
574
|
+
if (nloe) {
|
|
575
|
+
HVX_Vector_x8 vy0_q = hvx_vec_load_q8x4x8_partial(y0_q + i * y_qblk_size, nloe);
|
|
576
|
+
HVX_Vector_x8 vy1_q = hvx_vec_load_q8x4x8_partial(y1_q + i * y_qblk_size, nloe);
|
|
577
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_q4x4x8_partial(r0_x_q + i * x_qblk_size, nloe);
|
|
578
|
+
HVX_Vector_x8 r1_q = hvx_vec_load_q4x4x8_partial(r1_x_q + i * x_qblk_size, nloe);
|
|
579
|
+
|
|
580
|
+
HVX_Vector r0_c0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r0_q, vy0_q, nloe));
|
|
581
|
+
HVX_Vector r0_c1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r0_q, vy1_q, nloe));
|
|
582
|
+
HVX_Vector r1_c0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r1_q, vy0_q, nloe));
|
|
583
|
+
HVX_Vector r1_c1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r1_q, vy1_q, nloe));
|
|
584
|
+
|
|
585
|
+
HVX_Vector vy0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y0_d + i * y_dblk_size));
|
|
586
|
+
HVX_Vector vy1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y1_d + i * y_dblk_size));
|
|
587
|
+
HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size));
|
|
588
|
+
HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size));
|
|
589
|
+
|
|
590
|
+
HVX_Vector r0_c0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy0_d)));
|
|
591
|
+
HVX_Vector r0_c1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy1_d)));
|
|
592
|
+
HVX_Vector r1_c0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy0_d)));
|
|
593
|
+
HVX_Vector r1_c1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy1_d)));
|
|
594
|
+
|
|
595
|
+
// Zero out unused scales
|
|
596
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8);
|
|
597
|
+
r0_c0_dd = Q6_V_vand_QV(bmask, r0_c0_dd);
|
|
598
|
+
r0_c1_dd = Q6_V_vand_QV(bmask, r0_c1_dd);
|
|
599
|
+
r1_c0_dd = Q6_V_vand_QV(bmask, r1_c0_dd);
|
|
600
|
+
r1_c1_dd = Q6_V_vand_QV(bmask, r1_c1_dd);
|
|
601
|
+
r0_c0_ia = Q6_V_vand_QV(bmask, r0_c0_ia);
|
|
602
|
+
r0_c1_ia = Q6_V_vand_QV(bmask, r0_c1_ia);
|
|
603
|
+
r1_c0_ia = Q6_V_vand_QV(bmask, r1_c0_ia);
|
|
604
|
+
r1_c1_ia = Q6_V_vand_QV(bmask, r1_c1_ia);
|
|
605
|
+
|
|
606
|
+
HVX_Vector r0_c0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_c0_ia, r0_c0_dd);
|
|
607
|
+
HVX_Vector r0_c1_fa = Q6_Vqf32_vmpy_VsfVsf(r0_c1_ia, r0_c1_dd);
|
|
608
|
+
HVX_Vector r1_c0_fa = Q6_Vqf32_vmpy_VsfVsf(r1_c0_ia, r1_c0_dd);
|
|
609
|
+
HVX_Vector r1_c1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_c1_ia, r1_c1_dd);
|
|
610
|
+
|
|
611
|
+
r0_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_c0_fa, r0_c0_sum));
|
|
612
|
+
r0_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_c1_fa, r0_c1_sum));
|
|
613
|
+
r1_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_c0_fa, r1_c0_sum));
|
|
614
|
+
r1_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_c1_fa, r1_c1_sum));
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
// Reduce and store results
|
|
618
|
+
HVX_Vector r0_r1_c0_sum = hvx_vec_reduce_sum_f32x2(r0_c0_sum, r1_c0_sum);
|
|
619
|
+
HVX_Vector r0_r1_c1_sum = hvx_vec_reduce_sum_f32x2(r0_c1_sum, r1_c1_sum);
|
|
620
|
+
|
|
621
|
+
hvx_vec_store_u(s0, 8, r0_r1_c0_sum); // row0,col0 row1,col0
|
|
622
|
+
hvx_vec_store_u(s1, 8, r0_r1_c1_sum); // row0,col1 row1,col1
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
static void vec_dot_q8x4x2_q8x4x2_1x1(const int n, float * restrict s0, const void * restrict vx0, const void * restrict vy0) {
|
|
626
|
+
assert(n % 32 == 0); // min sub-block size
|
|
627
|
+
assert((unsigned long) vx0 % 128 == 0);
|
|
628
|
+
assert((unsigned long) vy0 % 128 == 0);
|
|
629
|
+
|
|
630
|
+
const uint32_t qk = QK_Q4_0x4x2 * 4;
|
|
631
|
+
|
|
632
|
+
const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
633
|
+
const uint32_t x_qblk_size = qk; // int8
|
|
634
|
+
const uint32_t x_qrow_size = n; // int8 (not padded)
|
|
635
|
+
|
|
636
|
+
const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
637
|
+
const uint32_t y_qblk_size = qk; // int8
|
|
638
|
+
const uint32_t y_qrow_size = n; // int8 (not padded)
|
|
639
|
+
|
|
640
|
+
const uint8_t * restrict r0_x_q = ((const uint8_t *) vx0 + 0); // quants first
|
|
641
|
+
const uint8_t * restrict r0_x_d = ((const uint8_t *) vx0 + x_qrow_size); // then scales
|
|
642
|
+
|
|
643
|
+
const uint8_t * restrict y_q = ((const uint8_t *) vy0 + 0); // quants first
|
|
644
|
+
const uint8_t * restrict y_d = ((const uint8_t *) vy0 + y_qrow_size); // then scales
|
|
645
|
+
|
|
646
|
+
// Row sum (sf)
|
|
647
|
+
HVX_Vector r0_sum = Q6_V_vzero();
|
|
648
|
+
|
|
649
|
+
// Multiply and accumulate into int32.
|
|
650
|
+
// Compute combined scale (fp32).
|
|
651
|
+
// Apply scale to acc and accumulate into the row sum (qf32).
|
|
652
|
+
|
|
653
|
+
const uint32_t nb = n / qk; // num full blocks
|
|
654
|
+
int32_t nloe = n % qk; // num leftover elemements (must be signed)
|
|
655
|
+
|
|
656
|
+
uint32_t i = 0;
|
|
657
|
+
for (; i < nb; i++) {
|
|
658
|
+
HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8_full(y_q + i * y_qblk_size);
|
|
659
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8_full(r0_x_q + i * x_qblk_size);
|
|
660
|
+
|
|
661
|
+
HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q));
|
|
662
|
+
|
|
663
|
+
HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size));
|
|
664
|
+
HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size));
|
|
665
|
+
|
|
666
|
+
HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d)));
|
|
667
|
+
|
|
668
|
+
HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd);
|
|
669
|
+
|
|
670
|
+
r0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_fa, r0_sum));
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
// Process leftovers
|
|
674
|
+
if (nloe) {
|
|
675
|
+
HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8_partial(y_q + i * y_qblk_size, nloe);
|
|
676
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8_partial(r0_x_q + i * x_qblk_size, nloe);
|
|
677
|
+
|
|
678
|
+
HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r0_q, vy_q, nloe));
|
|
679
|
+
|
|
680
|
+
HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size));
|
|
681
|
+
HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size));
|
|
682
|
+
|
|
683
|
+
HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d)));
|
|
684
|
+
|
|
685
|
+
// Zero out unused elements
|
|
686
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8);
|
|
687
|
+
r0_dd = Q6_V_vand_QV(bmask, r0_dd);
|
|
688
|
+
r0_ia = Q6_V_vand_QV(bmask, r0_ia);
|
|
689
|
+
|
|
690
|
+
HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd);
|
|
691
|
+
|
|
692
|
+
r0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_fa, r0_sum));
|
|
693
|
+
}
|
|
694
|
+
|
|
695
|
+
r0_sum = hvx_vec_reduce_sum_f32(r0_sum);
|
|
696
|
+
|
|
697
|
+
hvx_vec_store_u(s0, 4, r0_sum);
|
|
698
|
+
}
|
|
699
|
+
|
|
700
|
+
static void vec_dot_q8x4x2_q8x4x2_2x1(const int n, float * restrict s0,
|
|
701
|
+
const void * restrict vx0, const void * restrict vx1,
|
|
702
|
+
const void * restrict vy0) {
|
|
703
|
+
assert(n % 32 == 0); // min sub-block size
|
|
704
|
+
assert((unsigned long) vx0 % 128 == 0);
|
|
705
|
+
assert((unsigned long) vx1 % 128 == 0);
|
|
706
|
+
assert((unsigned long) vy0 % 128 == 0);
|
|
707
|
+
|
|
708
|
+
const uint32_t qk = QK_Q4_0x4x2 * 4;
|
|
709
|
+
|
|
710
|
+
const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
711
|
+
const uint32_t x_qblk_size = qk; // int8
|
|
712
|
+
const uint32_t x_qrow_size = n; // int8 (not padded)
|
|
713
|
+
|
|
714
|
+
const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
715
|
+
const uint32_t y_qblk_size = qk; // int8
|
|
716
|
+
const uint32_t y_qrow_size = n; // int8 (not padded)
|
|
717
|
+
|
|
718
|
+
const uint8_t * restrict r0_x_q = ((const uint8_t *) vx0) + 0; // quants first
|
|
719
|
+
const uint8_t * restrict r0_x_d = ((const uint8_t *) vx0) + x_qrow_size; // then scales
|
|
720
|
+
const uint8_t * restrict r1_x_q = ((const uint8_t *) vx1) + 0; // quants first
|
|
721
|
+
const uint8_t * restrict r1_x_d = ((const uint8_t *) vx1) + x_qrow_size; // then scales
|
|
722
|
+
|
|
723
|
+
const uint8_t * restrict y_q = ((const uint8_t *) vy0 + 0); // quants first
|
|
724
|
+
const uint8_t * restrict y_d = ((const uint8_t *) vy0 + y_qrow_size); // then scales
|
|
725
|
+
|
|
726
|
+
// Row sum (qf32)
|
|
727
|
+
HVX_Vector r0_sum = Q6_V_vzero();
|
|
728
|
+
HVX_Vector r1_sum = Q6_V_vzero();
|
|
729
|
+
|
|
730
|
+
// Multiply and accumulate into int32.
|
|
731
|
+
// Compute combined scale (fp32).
|
|
732
|
+
// Apply scale to acc and accumulate into the row sum (qf32).
|
|
733
|
+
|
|
734
|
+
const uint32_t nb = n / qk; // num full blocks
|
|
735
|
+
int32_t nloe = n % qk; // num leftover elemements (must be signed)
|
|
736
|
+
|
|
737
|
+
uint32_t i = 0;
|
|
738
|
+
for (; i < nb; i++) {
|
|
739
|
+
HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8_full(y_q + i * y_qblk_size);
|
|
740
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8_full(r0_x_q + i * x_qblk_size);
|
|
741
|
+
HVX_Vector_x8 r1_q = hvx_vec_load_q8x4x8_full(r1_x_q + i * x_qblk_size);
|
|
742
|
+
|
|
743
|
+
HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q));
|
|
744
|
+
HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy_q));
|
|
745
|
+
|
|
746
|
+
HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size));
|
|
747
|
+
HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size));
|
|
748
|
+
HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size));
|
|
749
|
+
|
|
750
|
+
HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d)));
|
|
751
|
+
HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy_d)));
|
|
752
|
+
|
|
753
|
+
HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd);
|
|
754
|
+
HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd);
|
|
755
|
+
|
|
756
|
+
r0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_fa, r0_sum));
|
|
757
|
+
r1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_fa, r1_sum));
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
// Process leftovers
|
|
761
|
+
if (nloe) {
|
|
762
|
+
HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8_partial(y_q + i * y_qblk_size, nloe);
|
|
763
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8_partial(r0_x_q + i * x_qblk_size, nloe);
|
|
764
|
+
HVX_Vector_x8 r1_q = hvx_vec_load_q8x4x8_partial(r1_x_q + i * x_qblk_size, nloe);
|
|
765
|
+
|
|
766
|
+
HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r0_q, vy_q, nloe));
|
|
767
|
+
HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r1_q, vy_q, nloe));
|
|
768
|
+
|
|
769
|
+
HVX_Vector vy_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y_d + i * y_dblk_size));
|
|
770
|
+
HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size));
|
|
771
|
+
HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size));
|
|
772
|
+
|
|
773
|
+
HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy_d)));
|
|
774
|
+
HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy_d)));
|
|
775
|
+
|
|
776
|
+
// Zero out unused elements
|
|
777
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8);
|
|
778
|
+
r0_dd = Q6_V_vand_QV(bmask, r0_dd);
|
|
779
|
+
r1_dd = Q6_V_vand_QV(bmask, r1_dd);
|
|
780
|
+
r0_ia = Q6_V_vand_QV(bmask, r0_ia);
|
|
781
|
+
r1_ia = Q6_V_vand_QV(bmask, r1_ia);
|
|
782
|
+
|
|
783
|
+
HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd);
|
|
784
|
+
HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd);
|
|
785
|
+
|
|
786
|
+
r0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_fa, r0_sum));
|
|
787
|
+
r1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_fa, r1_sum));
|
|
788
|
+
}
|
|
789
|
+
|
|
790
|
+
HVX_Vector rsum = hvx_vec_reduce_sum_f32x2(r0_sum, r1_sum);
|
|
791
|
+
hvx_vec_store_u(s0, 8, rsum);
|
|
792
|
+
}
|
|
793
|
+
|
|
794
|
+
static void vec_dot_q8x4x2_q8x4x2_2x2(const int n, float * restrict s0, float * restrict s1,
|
|
795
|
+
const void * restrict vx0, const void * restrict vx1,
|
|
796
|
+
const void * restrict vy0, const void * restrict vy1) {
|
|
797
|
+
assert(n % 32 == 0);
|
|
798
|
+
assert((unsigned long) vx0 % 128 == 0);
|
|
799
|
+
assert((unsigned long) vx1 % 128 == 0);
|
|
800
|
+
assert((unsigned long) vy0 % 128 == 0);
|
|
801
|
+
assert((unsigned long) vy1 % 128 == 0);
|
|
802
|
+
|
|
803
|
+
const uint32_t qk = QK_Q8_0x4x2 * 4;
|
|
804
|
+
|
|
805
|
+
const uint32_t x_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
806
|
+
const uint32_t x_qblk_size = qk; // int8
|
|
807
|
+
const uint32_t x_qrow_size = n; // int8 (not padded)
|
|
808
|
+
|
|
809
|
+
const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
810
|
+
const uint32_t y_qblk_size = qk; // int8
|
|
811
|
+
const uint32_t y_qrow_size = n; // int8 (not padded)
|
|
812
|
+
|
|
813
|
+
const uint8_t * restrict r0_x_q = ((const uint8_t *) vx0) + 0; // quants first
|
|
814
|
+
const uint8_t * restrict r0_x_d = ((const uint8_t *) vx0) + x_qrow_size; // then scales
|
|
815
|
+
const uint8_t * restrict r1_x_q = ((const uint8_t *) vx1) + 0; // quants first
|
|
816
|
+
const uint8_t * restrict r1_x_d = ((const uint8_t *) vx1) + x_qrow_size; // then scales
|
|
817
|
+
|
|
818
|
+
const uint8_t * restrict y0_q = ((const uint8_t *) vy0) + 0; // quants first
|
|
819
|
+
const uint8_t * restrict y0_d = ((const uint8_t *) vy0) + y_qrow_size; // then scales
|
|
820
|
+
const uint8_t * restrict y1_q = ((const uint8_t *) vy1) + 0; // quants first
|
|
821
|
+
const uint8_t * restrict y1_d = ((const uint8_t *) vy1) + y_qrow_size; // then scales
|
|
822
|
+
|
|
823
|
+
// Row sums (sf) - 4 accumulators for 2×2 tile
|
|
824
|
+
HVX_Vector r0_c0_sum = Q6_V_vzero();
|
|
825
|
+
HVX_Vector r0_c1_sum = Q6_V_vzero();
|
|
826
|
+
HVX_Vector r1_c0_sum = Q6_V_vzero();
|
|
827
|
+
HVX_Vector r1_c1_sum = Q6_V_vzero();
|
|
828
|
+
|
|
829
|
+
const uint32_t nb = n / qk; // num full blocks
|
|
830
|
+
const uint32_t nloe = n % qk; // num leftover elements
|
|
831
|
+
|
|
832
|
+
uint32_t i = 0;
|
|
833
|
+
for (; i < nb; i++) {
|
|
834
|
+
// Load src1 columns (reused across both src0 rows)
|
|
835
|
+
HVX_Vector_x8 vy0_q = hvx_vec_load_q8x4x8_full(y0_q + i * y_qblk_size);
|
|
836
|
+
HVX_Vector_x8 vy1_q = hvx_vec_load_q8x4x8_full(y1_q + i * y_qblk_size);
|
|
837
|
+
|
|
838
|
+
// Load src0 rows (reused across both src1 columns)
|
|
839
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8_full(r0_x_q + i * x_qblk_size);
|
|
840
|
+
HVX_Vector_x8 r1_q = hvx_vec_load_q8x4x8_full(r1_x_q + i * x_qblk_size);
|
|
841
|
+
|
|
842
|
+
// Compute 4 dot products: r0×c0, r0×c1, r1×c0, r1×c1
|
|
843
|
+
HVX_Vector r0_c0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy0_q));
|
|
844
|
+
HVX_Vector r0_c1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy1_q));
|
|
845
|
+
HVX_Vector r1_c0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy0_q));
|
|
846
|
+
HVX_Vector r1_c1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy1_q));
|
|
847
|
+
|
|
848
|
+
// Load scales
|
|
849
|
+
HVX_Vector vy0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y0_d + i * y_dblk_size));
|
|
850
|
+
HVX_Vector vy1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y1_d + i * y_dblk_size));
|
|
851
|
+
HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size));
|
|
852
|
+
HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size));
|
|
853
|
+
|
|
854
|
+
// Compute combined scales
|
|
855
|
+
HVX_Vector r0_c0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy0_d)));
|
|
856
|
+
HVX_Vector r0_c1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy1_d)));
|
|
857
|
+
HVX_Vector r1_c0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy0_d)));
|
|
858
|
+
HVX_Vector r1_c1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy1_d)));
|
|
859
|
+
|
|
860
|
+
// Apply scales and accumulate
|
|
861
|
+
HVX_Vector r0_c0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_c0_ia, r0_c0_dd);
|
|
862
|
+
HVX_Vector r0_c1_fa = Q6_Vqf32_vmpy_VsfVsf(r0_c1_ia, r0_c1_dd);
|
|
863
|
+
HVX_Vector r1_c0_fa = Q6_Vqf32_vmpy_VsfVsf(r1_c0_ia, r1_c0_dd);
|
|
864
|
+
HVX_Vector r1_c1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_c1_ia, r1_c1_dd);
|
|
865
|
+
|
|
866
|
+
r0_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_c0_fa, r0_c0_sum));
|
|
867
|
+
r0_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_c1_fa, r0_c1_sum));
|
|
868
|
+
r1_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_c0_fa, r1_c0_sum));
|
|
869
|
+
r1_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_c1_fa, r1_c1_sum));
|
|
870
|
+
}
|
|
871
|
+
|
|
872
|
+
// Process leftovers
|
|
873
|
+
if (nloe) {
|
|
874
|
+
HVX_Vector_x8 vy0_q = hvx_vec_load_q8x4x8_partial(y0_q + i * y_qblk_size, nloe);
|
|
875
|
+
HVX_Vector_x8 vy1_q = hvx_vec_load_q8x4x8_partial(y1_q + i * y_qblk_size, nloe);
|
|
876
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_q8x4x8_partial(r0_x_q + i * x_qblk_size, nloe);
|
|
877
|
+
HVX_Vector_x8 r1_q = hvx_vec_load_q8x4x8_partial(r1_x_q + i * x_qblk_size, nloe);
|
|
878
|
+
|
|
879
|
+
HVX_Vector r0_c0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r0_q, vy0_q, nloe));
|
|
880
|
+
HVX_Vector r0_c1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r0_q, vy1_q, nloe));
|
|
881
|
+
HVX_Vector r1_c0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r1_q, vy0_q, nloe));
|
|
882
|
+
HVX_Vector r1_c1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r1_q, vy1_q, nloe));
|
|
883
|
+
|
|
884
|
+
HVX_Vector vy0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y0_d + i * y_dblk_size));
|
|
885
|
+
HVX_Vector vy1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (y1_d + i * y_dblk_size));
|
|
886
|
+
HVX_Vector r0_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r0_x_d + i * x_dblk_size));
|
|
887
|
+
HVX_Vector r1_d = Q6_Vh_vshuff_Vh(*(const HVX_UVector *) (r1_x_d + i * x_dblk_size));
|
|
888
|
+
|
|
889
|
+
HVX_Vector r0_c0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy0_d)));
|
|
890
|
+
HVX_Vector r0_c1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r0_d, vy1_d)));
|
|
891
|
+
HVX_Vector r1_c0_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy0_d)));
|
|
892
|
+
HVX_Vector r1_c1_dd = Q6_Vsf_equals_Vqf32(Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(r1_d, vy1_d)));
|
|
893
|
+
|
|
894
|
+
// Zero out unused elements
|
|
895
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8);
|
|
896
|
+
r0_c0_dd = Q6_V_vand_QV(bmask, r0_c0_dd);
|
|
897
|
+
r0_c1_dd = Q6_V_vand_QV(bmask, r0_c1_dd);
|
|
898
|
+
r1_c0_dd = Q6_V_vand_QV(bmask, r1_c0_dd);
|
|
899
|
+
r1_c1_dd = Q6_V_vand_QV(bmask, r1_c1_dd);
|
|
900
|
+
r0_c0_ia = Q6_V_vand_QV(bmask, r0_c0_ia);
|
|
901
|
+
r0_c1_ia = Q6_V_vand_QV(bmask, r0_c1_ia);
|
|
902
|
+
r1_c0_ia = Q6_V_vand_QV(bmask, r1_c0_ia);
|
|
903
|
+
r1_c1_ia = Q6_V_vand_QV(bmask, r1_c1_ia);
|
|
904
|
+
|
|
905
|
+
HVX_Vector r0_c0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_c0_ia, r0_c0_dd);
|
|
906
|
+
HVX_Vector r0_c1_fa = Q6_Vqf32_vmpy_VsfVsf(r0_c1_ia, r0_c1_dd);
|
|
907
|
+
HVX_Vector r1_c0_fa = Q6_Vqf32_vmpy_VsfVsf(r1_c0_ia, r1_c0_dd);
|
|
908
|
+
HVX_Vector r1_c1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_c1_ia, r1_c1_dd);
|
|
909
|
+
|
|
910
|
+
r0_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_c0_fa, r0_c0_sum));
|
|
911
|
+
r0_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_c1_fa, r0_c1_sum));
|
|
912
|
+
r1_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_c0_fa, r1_c0_sum));
|
|
913
|
+
r1_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_c1_fa, r1_c1_sum));
|
|
914
|
+
}
|
|
915
|
+
|
|
916
|
+
// Reduce and store results
|
|
917
|
+
HVX_Vector r0_r1_c0_sum = hvx_vec_reduce_sum_f32x2(r0_c0_sum, r1_c0_sum);
|
|
918
|
+
HVX_Vector r0_r1_c1_sum = hvx_vec_reduce_sum_f32x2(r0_c1_sum, r1_c1_sum);
|
|
919
|
+
|
|
920
|
+
hvx_vec_store_u(&s0[0], 8, r0_r1_c0_sum); // row0,col0 row1,col0
|
|
921
|
+
hvx_vec_store_u(&s1[0], 8, r0_r1_c1_sum); // row0,col1 row1,col1
|
|
922
|
+
}
|
|
923
|
+
|
|
924
|
+
static void vec_dot_mxfp4x4x2_q8x4x2_1x1(const int n, float * restrict s0, const void * restrict vx0, const void * restrict vy0) {
|
|
925
|
+
assert(n % 32 == 0); // min sub-block size
|
|
926
|
+
assert((unsigned long) vx0 % 128 == 0);
|
|
927
|
+
assert((unsigned long) vy0 % 128 == 0);
|
|
928
|
+
|
|
929
|
+
const uint32_t qk = QK_MXFP4x4x2 * 4;
|
|
930
|
+
|
|
931
|
+
const uint32_t x_dblk_size = 8 * 4 * 1; // 32x e8m0
|
|
932
|
+
const uint32_t x_qblk_size = qk / 2; // fp4
|
|
933
|
+
const uint32_t x_qrow_size = n / 2; // fp4 (not padded)
|
|
934
|
+
|
|
935
|
+
const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
936
|
+
const uint32_t y_qblk_size = qk; // int8
|
|
937
|
+
const uint32_t y_qrow_size = n; // int8 (not padded)
|
|
938
|
+
|
|
939
|
+
const uint8_t * restrict r0_x_q = ((const uint8_t *) vx0 + 0); // quants first
|
|
940
|
+
const uint8_t * restrict r0_x_d = ((const uint8_t *) vx0 + x_qrow_size); // then scales
|
|
941
|
+
|
|
942
|
+
const uint8_t * restrict y_q = ((const uint8_t *) vy0 + 0); // quants first
|
|
943
|
+
const uint8_t * restrict y_d = ((const uint8_t *) vy0 + y_qrow_size); // then scales
|
|
944
|
+
|
|
945
|
+
// Row sum (sf)
|
|
946
|
+
HVX_Vector r0_sum = Q6_V_vzero();
|
|
947
|
+
|
|
948
|
+
// Multiply and accumulate into int32.
|
|
949
|
+
// Compute combined scale (fp32).
|
|
950
|
+
// Apply scale to acc and accumulate into the row sum (qf32).
|
|
951
|
+
|
|
952
|
+
const uint32_t nb = n / qk; // num full blocks
|
|
953
|
+
int32_t nloe = n % qk; // num leftover elemements (must be signed)
|
|
954
|
+
|
|
955
|
+
uint32_t i = 0;
|
|
956
|
+
for (; i < nb; i++) {
|
|
957
|
+
HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8_full( y_q + i * y_qblk_size);
|
|
958
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8_full(r0_x_q + i * x_qblk_size);
|
|
959
|
+
|
|
960
|
+
HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q));
|
|
961
|
+
|
|
962
|
+
HVX_Vector vy_d = *(const HVX_UVector *) (y_d + i * y_dblk_size);
|
|
963
|
+
HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size);
|
|
964
|
+
|
|
965
|
+
// Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving
|
|
966
|
+
HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16
|
|
967
|
+
vy_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy_d), half));
|
|
968
|
+
vy_d = Q6_Vsf_equals_Vqf32(vy_d);
|
|
969
|
+
|
|
970
|
+
// Convert rX_d scales from e8m0 to fp32
|
|
971
|
+
// Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ...
|
|
972
|
+
// Left shift with zero fill to create FP32
|
|
973
|
+
// FIXME: might need to handle zero as a special case (see ggml-cpu code)
|
|
974
|
+
HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0;
|
|
975
|
+
HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff);
|
|
976
|
+
r0_d = Q6_V_vdelta_VV(r0_d, expand);
|
|
977
|
+
r0_d = Q6_V_vand_VV(r0_d, e8m0_mask);
|
|
978
|
+
r0_d = Q6_Vw_vasl_VwR(r0_d, 23);
|
|
979
|
+
|
|
980
|
+
HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy_d));
|
|
981
|
+
|
|
982
|
+
HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd);
|
|
983
|
+
|
|
984
|
+
r0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_fa, r0_sum));
|
|
985
|
+
}
|
|
986
|
+
|
|
987
|
+
// Process leftovers
|
|
988
|
+
if (nloe) {
|
|
989
|
+
HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8_partial( y_q + i * y_qblk_size, nloe);
|
|
990
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8_partial(r0_x_q + i * x_qblk_size, nloe);
|
|
991
|
+
|
|
992
|
+
HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r0_q, vy_q, nloe));
|
|
993
|
+
|
|
994
|
+
HVX_Vector vy_d = *(const HVX_UVector *) (y_d + i * y_dblk_size);
|
|
995
|
+
HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size);
|
|
996
|
+
|
|
997
|
+
// Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving
|
|
998
|
+
HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16
|
|
999
|
+
vy_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy_d), half));
|
|
1000
|
+
vy_d = Q6_Vsf_equals_Vqf32(vy_d);
|
|
1001
|
+
|
|
1002
|
+
// Convert rX_d scales from e8m0 to fp32
|
|
1003
|
+
// Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ...
|
|
1004
|
+
// Left shift with zero fill to create FP32
|
|
1005
|
+
// FIXME: might need to handle zero as a special case (see ggml-cpu code)
|
|
1006
|
+
HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0;
|
|
1007
|
+
HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff);
|
|
1008
|
+
r0_d = Q6_V_vdelta_VV(r0_d, expand);
|
|
1009
|
+
r0_d = Q6_V_vand_VV(r0_d, e8m0_mask);
|
|
1010
|
+
r0_d = Q6_Vw_vasl_VwR(r0_d, 23);
|
|
1011
|
+
|
|
1012
|
+
HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy_d));
|
|
1013
|
+
|
|
1014
|
+
// Zero-out unused scales
|
|
1015
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8);
|
|
1016
|
+
r0_dd = Q6_V_vand_QV(bmask, r0_dd);
|
|
1017
|
+
r0_ia = Q6_V_vand_QV(bmask, r0_ia);
|
|
1018
|
+
|
|
1019
|
+
HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd);
|
|
1020
|
+
|
|
1021
|
+
r0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_fa, r0_sum));
|
|
1022
|
+
}
|
|
1023
|
+
|
|
1024
|
+
r0_sum = hvx_vec_reduce_sum_f32(r0_sum);
|
|
1025
|
+
|
|
1026
|
+
hvx_vec_store_u(s0, 4, r0_sum);
|
|
1027
|
+
}
|
|
1028
|
+
|
|
1029
|
+
static void vec_dot_mxfp4x4x2_q8x4x2_2x1(const int n, float * restrict s0,
|
|
1030
|
+
const void * restrict vx0, const void * restrict vx1,
|
|
1031
|
+
const void * restrict vy0) {
|
|
1032
|
+
assert(n % 32 == 0); // min sub-block size
|
|
1033
|
+
assert((unsigned long) vx0 % 128 == 0);
|
|
1034
|
+
assert((unsigned long) vx1 % 128 == 0);
|
|
1035
|
+
assert((unsigned long) vy0 % 128 == 0);
|
|
1036
|
+
|
|
1037
|
+
const uint32_t qk = QK_MXFP4x4x2 * 4;
|
|
1038
|
+
|
|
1039
|
+
const uint32_t x_dblk_size = 8 * 4 * 1; // 32x e8m0
|
|
1040
|
+
const uint32_t x_qblk_size = qk / 2; // fp4
|
|
1041
|
+
const uint32_t x_qrow_size = n / 2; // fp4 (not padded)
|
|
1042
|
+
|
|
1043
|
+
const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
1044
|
+
const uint32_t y_qblk_size = qk; // int8
|
|
1045
|
+
const uint32_t y_qrow_size = n; // int8 (not padded)
|
|
1046
|
+
|
|
1047
|
+
const uint8_t * restrict r0_x_q = ((const uint8_t *) vx0) + 0; // quants first
|
|
1048
|
+
const uint8_t * restrict r0_x_d = ((const uint8_t *) vx0) + x_qrow_size; // then scales
|
|
1049
|
+
const uint8_t * restrict r1_x_q = ((const uint8_t *) vx1) + 0; // quants first
|
|
1050
|
+
const uint8_t * restrict r1_x_d = ((const uint8_t *) vx1) + x_qrow_size; // then scales
|
|
1051
|
+
|
|
1052
|
+
const uint8_t * restrict y_q = ((const uint8_t *) vy0) + 0; // quants first
|
|
1053
|
+
const uint8_t * restrict y_d = ((const uint8_t *) vy0) + y_qrow_size; // then scales
|
|
1054
|
+
|
|
1055
|
+
// Row sum (sf)
|
|
1056
|
+
HVX_Vector r0_sum = Q6_V_vzero();
|
|
1057
|
+
HVX_Vector r1_sum = Q6_V_vzero();
|
|
1058
|
+
|
|
1059
|
+
// Multiply and accumulate into int32.
|
|
1060
|
+
// Compute combined scale (fp32).
|
|
1061
|
+
// Apply scale to acc and accumulate into the row sum (f32).
|
|
1062
|
+
|
|
1063
|
+
const uint32_t nb = n / qk; // num full blocks
|
|
1064
|
+
int32_t nloe = n % qk; // num leftover elemements (must be signed)
|
|
1065
|
+
|
|
1066
|
+
uint32_t i = 0;
|
|
1067
|
+
for (; i < nb; i++) {
|
|
1068
|
+
HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8_full( y_q + i * y_qblk_size);
|
|
1069
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8_full(r0_x_q + i * x_qblk_size);
|
|
1070
|
+
HVX_Vector_x8 r1_q = hvx_vec_load_mxfp4x4x8_full(r1_x_q + i * x_qblk_size);
|
|
1071
|
+
|
|
1072
|
+
HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q));
|
|
1073
|
+
HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy_q));
|
|
1074
|
+
|
|
1075
|
+
HVX_Vector vy_d = *(const HVX_UVector *) (y_d + i * y_dblk_size);
|
|
1076
|
+
HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size);
|
|
1077
|
+
HVX_Vector r1_d = *(const HVX_UVector *) (r1_x_d + i * x_dblk_size);
|
|
1078
|
+
|
|
1079
|
+
// Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving
|
|
1080
|
+
HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16
|
|
1081
|
+
vy_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy_d), half));
|
|
1082
|
+
vy_d = Q6_Vsf_equals_Vqf32(vy_d);
|
|
1083
|
+
|
|
1084
|
+
// Convert rX_d scales from e8m0 to fp32
|
|
1085
|
+
// Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ...
|
|
1086
|
+
// Left shift with zero fill to create FP32
|
|
1087
|
+
// FIXME: might need to handle zero as a special case (see ggml-cpu code)
|
|
1088
|
+
HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0;
|
|
1089
|
+
HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff);
|
|
1090
|
+
r0_d = Q6_V_vdelta_VV(r0_d, expand);
|
|
1091
|
+
r0_d = Q6_V_vand_VV(r0_d, e8m0_mask);
|
|
1092
|
+
r0_d = Q6_Vw_vasl_VwR(r0_d, 23);
|
|
1093
|
+
r1_d = Q6_V_vdelta_VV(r1_d, expand);
|
|
1094
|
+
r1_d = Q6_V_vand_VV(r1_d, e8m0_mask);
|
|
1095
|
+
r1_d = Q6_Vw_vasl_VwR(r1_d, 23);
|
|
1096
|
+
|
|
1097
|
+
HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy_d));
|
|
1098
|
+
HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r1_d, vy_d));
|
|
1099
|
+
|
|
1100
|
+
HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd);
|
|
1101
|
+
HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd);
|
|
1102
|
+
|
|
1103
|
+
r0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_fa, r0_sum));
|
|
1104
|
+
r1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_fa, r1_sum));
|
|
1105
|
+
}
|
|
1106
|
+
|
|
1107
|
+
// Process leftovers
|
|
1108
|
+
if (nloe) {
|
|
1109
|
+
HVX_Vector_x8 vy_q = hvx_vec_load_q8x4x8_partial( y_q + i * y_qblk_size, nloe);
|
|
1110
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8_partial(r0_x_q + i * x_qblk_size, nloe);
|
|
1111
|
+
HVX_Vector_x8 r1_q = hvx_vec_load_mxfp4x4x8_partial(r1_x_q + i * x_qblk_size, nloe);
|
|
1112
|
+
|
|
1113
|
+
HVX_Vector r0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy_q));
|
|
1114
|
+
HVX_Vector r1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy_q));
|
|
1115
|
+
|
|
1116
|
+
HVX_Vector vy_d = *(const HVX_UVector *) (y_d + i * y_dblk_size);
|
|
1117
|
+
HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size);
|
|
1118
|
+
HVX_Vector r1_d = *(const HVX_UVector *) (r1_x_d + i * x_dblk_size);
|
|
1119
|
+
|
|
1120
|
+
// Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving
|
|
1121
|
+
HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16
|
|
1122
|
+
vy_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy_d), half));
|
|
1123
|
+
vy_d = Q6_Vsf_equals_Vqf32(vy_d);
|
|
1124
|
+
|
|
1125
|
+
// Convert rX_d scales from e8m0 to fp32
|
|
1126
|
+
// Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ...
|
|
1127
|
+
// Left shift with zero fill to create FP32
|
|
1128
|
+
// FIXME: might need to handle zero as a special case (see ggml-cpu code)
|
|
1129
|
+
HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0;
|
|
1130
|
+
HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff);
|
|
1131
|
+
r0_d = Q6_V_vdelta_VV(r0_d, expand);
|
|
1132
|
+
r0_d = Q6_V_vand_VV(r0_d, e8m0_mask);
|
|
1133
|
+
r0_d = Q6_Vw_vasl_VwR(r0_d, 23);
|
|
1134
|
+
r1_d = Q6_V_vdelta_VV(r1_d, expand);
|
|
1135
|
+
r1_d = Q6_V_vand_VV(r1_d, e8m0_mask);
|
|
1136
|
+
r1_d = Q6_Vw_vasl_VwR(r1_d, 23);
|
|
1137
|
+
|
|
1138
|
+
HVX_Vector r0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy_d));
|
|
1139
|
+
HVX_Vector r1_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r1_d, vy_d));
|
|
1140
|
+
|
|
1141
|
+
// Zero-out unused values
|
|
1142
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8);
|
|
1143
|
+
r0_dd = Q6_V_vand_QV(bmask, r0_dd);
|
|
1144
|
+
r1_dd = Q6_V_vand_QV(bmask, r1_dd);
|
|
1145
|
+
r0_ia = Q6_V_vand_QV(bmask, r0_ia);
|
|
1146
|
+
r1_ia = Q6_V_vand_QV(bmask, r1_ia);
|
|
1147
|
+
|
|
1148
|
+
HVX_Vector r0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_ia, r0_dd);
|
|
1149
|
+
HVX_Vector r1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_ia, r1_dd);
|
|
1150
|
+
|
|
1151
|
+
r0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_fa, r0_sum));
|
|
1152
|
+
r1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_fa, r1_sum));
|
|
1153
|
+
}
|
|
1154
|
+
|
|
1155
|
+
HVX_Vector rsum = hvx_vec_reduce_sum_f32x2(r0_sum, r1_sum);
|
|
1156
|
+
hvx_vec_store_u(s0, 8, rsum);
|
|
1157
|
+
}
|
|
1158
|
+
|
|
1159
|
+
static void vec_dot_mxfp4x4x2_q8x4x2_2x2(const int n, float * restrict s0, float * restrict s1,
|
|
1160
|
+
const void * restrict vx0, const void * restrict vx1,
|
|
1161
|
+
const void * restrict vy0, const void * restrict vy1) {
|
|
1162
|
+
assert(n % 32 == 0);
|
|
1163
|
+
assert((unsigned long) vx0 % 128 == 0);
|
|
1164
|
+
assert((unsigned long) vx1 % 128 == 0);
|
|
1165
|
+
assert((unsigned long) vy0 % 128 == 0);
|
|
1166
|
+
assert((unsigned long) vy1 % 128 == 0);
|
|
1167
|
+
|
|
1168
|
+
const uint32_t qk = QK_MXFP4x4x2 * 4;
|
|
1169
|
+
|
|
1170
|
+
const uint32_t x_dblk_size = 8 * 4 * 1; // 32x e8m0
|
|
1171
|
+
const uint32_t x_qblk_size = qk / 2; // fp4
|
|
1172
|
+
const uint32_t x_qrow_size = n / 2; // fp4 (not padded)
|
|
1173
|
+
|
|
1174
|
+
const uint32_t y_dblk_size = 8 * 4 * 2; // 32x __fp16
|
|
1175
|
+
const uint32_t y_qblk_size = qk; // int8
|
|
1176
|
+
const uint32_t y_qrow_size = n; // int8 (not padded)
|
|
1177
|
+
|
|
1178
|
+
const uint8_t * restrict r0_x_q = ((const uint8_t *) vx0) + 0; // quants first
|
|
1179
|
+
const uint8_t * restrict r0_x_d = ((const uint8_t *) vx0) + x_qrow_size; // then scales
|
|
1180
|
+
const uint8_t * restrict r1_x_q = ((const uint8_t *) vx1) + 0; // quants first
|
|
1181
|
+
const uint8_t * restrict r1_x_d = ((const uint8_t *) vx1) + x_qrow_size; // then scales
|
|
1182
|
+
|
|
1183
|
+
const uint8_t * restrict y0_q = ((const uint8_t *) vy0) + 0; // quants first
|
|
1184
|
+
const uint8_t * restrict y0_d = ((const uint8_t *) vy0) + y_qrow_size; // then scales
|
|
1185
|
+
const uint8_t * restrict y1_q = ((const uint8_t *) vy1) + 0; // quants first
|
|
1186
|
+
const uint8_t * restrict y1_d = ((const uint8_t *) vy1) + y_qrow_size; // then scales
|
|
1187
|
+
|
|
1188
|
+
// Row sums (sf) - 4 accumulators for 2×2 tile
|
|
1189
|
+
HVX_Vector r0_c0_sum = Q6_V_vzero();
|
|
1190
|
+
HVX_Vector r0_c1_sum = Q6_V_vzero();
|
|
1191
|
+
HVX_Vector r1_c0_sum = Q6_V_vzero();
|
|
1192
|
+
HVX_Vector r1_c1_sum = Q6_V_vzero();
|
|
1193
|
+
|
|
1194
|
+
const uint32_t nb = n / qk; // num full blocks
|
|
1195
|
+
const uint32_t nloe = n % qk; // num leftover elements
|
|
1196
|
+
|
|
1197
|
+
uint32_t i = 0;
|
|
1198
|
+
for (; i < nb; i++) {
|
|
1199
|
+
// Load src1 columns (reused across both src0 rows)
|
|
1200
|
+
HVX_Vector_x8 vy0_q = hvx_vec_load_q8x4x8_full(y0_q + i * y_qblk_size);
|
|
1201
|
+
HVX_Vector_x8 vy1_q = hvx_vec_load_q8x4x8_full(y1_q + i * y_qblk_size);
|
|
1202
|
+
|
|
1203
|
+
// Load src0 rows (reused across both src1 columns)
|
|
1204
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8_full(r0_x_q + i * x_qblk_size);
|
|
1205
|
+
HVX_Vector_x8 r1_q = hvx_vec_load_mxfp4x4x8_full(r1_x_q + i * x_qblk_size);
|
|
1206
|
+
|
|
1207
|
+
// Compute 4 dot products: r0×c0, r0×c1, r1×c0, r1×c1
|
|
1208
|
+
HVX_Vector r0_c0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy0_q));
|
|
1209
|
+
HVX_Vector r0_c1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r0_q, vy1_q));
|
|
1210
|
+
HVX_Vector r1_c0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy0_q));
|
|
1211
|
+
HVX_Vector r1_c1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_full(r1_q, vy1_q));
|
|
1212
|
+
|
|
1213
|
+
// Load scales
|
|
1214
|
+
HVX_Vector vy0_d = *(const HVX_UVector *) (y0_d + i * y_dblk_size);
|
|
1215
|
+
HVX_Vector vy1_d = *(const HVX_UVector *) (y1_d + i * y_dblk_size);
|
|
1216
|
+
HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size);
|
|
1217
|
+
HVX_Vector r1_d = *(const HVX_UVector *) (r1_x_d + i * x_dblk_size);
|
|
1218
|
+
|
|
1219
|
+
// Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving
|
|
1220
|
+
HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16
|
|
1221
|
+
vy0_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy0_d), half));
|
|
1222
|
+
vy0_d = Q6_Vsf_equals_Vqf32(vy0_d);
|
|
1223
|
+
vy1_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy1_d), half));
|
|
1224
|
+
vy1_d = Q6_Vsf_equals_Vqf32(vy1_d);
|
|
1225
|
+
|
|
1226
|
+
// Convert rX_d scales from e8m0 to fp32
|
|
1227
|
+
// Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ...
|
|
1228
|
+
// Left shift with zero fill to create FP32
|
|
1229
|
+
// FIXME: might need to handle zero as a special case (see ggml-cpu code)
|
|
1230
|
+
HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0;
|
|
1231
|
+
HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff);
|
|
1232
|
+
r0_d = Q6_V_vdelta_VV(r0_d, expand);
|
|
1233
|
+
r0_d = Q6_V_vand_VV(r0_d, e8m0_mask);
|
|
1234
|
+
r0_d = Q6_Vw_vasl_VwR(r0_d, 23);
|
|
1235
|
+
r1_d = Q6_V_vdelta_VV(r1_d, expand);
|
|
1236
|
+
r1_d = Q6_V_vand_VV(r1_d, e8m0_mask);
|
|
1237
|
+
r1_d = Q6_Vw_vasl_VwR(r1_d, 23);
|
|
1238
|
+
|
|
1239
|
+
// Compute combined scales
|
|
1240
|
+
HVX_Vector r0_c0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy0_d));
|
|
1241
|
+
HVX_Vector r0_c1_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy1_d));
|
|
1242
|
+
HVX_Vector r1_c0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r1_d, vy0_d));
|
|
1243
|
+
HVX_Vector r1_c1_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r1_d, vy1_d));
|
|
1244
|
+
|
|
1245
|
+
// Apply scales and accumulate
|
|
1246
|
+
HVX_Vector r0_c0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_c0_ia, r0_c0_dd);
|
|
1247
|
+
HVX_Vector r0_c1_fa = Q6_Vqf32_vmpy_VsfVsf(r0_c1_ia, r0_c1_dd);
|
|
1248
|
+
HVX_Vector r1_c0_fa = Q6_Vqf32_vmpy_VsfVsf(r1_c0_ia, r1_c0_dd);
|
|
1249
|
+
HVX_Vector r1_c1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_c1_ia, r1_c1_dd);
|
|
1250
|
+
|
|
1251
|
+
r0_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_c0_fa, r0_c0_sum));
|
|
1252
|
+
r0_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_c1_fa, r0_c1_sum));
|
|
1253
|
+
r1_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_c0_fa, r1_c0_sum));
|
|
1254
|
+
r1_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_c1_fa, r1_c1_sum));
|
|
1255
|
+
}
|
|
1256
|
+
|
|
1257
|
+
// Process leftovers
|
|
1258
|
+
if (nloe) {
|
|
1259
|
+
HVX_Vector_x8 vy0_q = hvx_vec_load_q8x4x8_partial( y0_q + i * y_qblk_size, nloe);
|
|
1260
|
+
HVX_Vector_x8 vy1_q = hvx_vec_load_q8x4x8_partial( y1_q + i * y_qblk_size, nloe);
|
|
1261
|
+
HVX_Vector_x8 r0_q = hvx_vec_load_mxfp4x4x8_partial(r0_x_q + i * x_qblk_size, nloe);
|
|
1262
|
+
HVX_Vector_x8 r1_q = hvx_vec_load_mxfp4x4x8_partial(r1_x_q + i * x_qblk_size, nloe);
|
|
1263
|
+
|
|
1264
|
+
HVX_Vector r0_c0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r0_q, vy0_q, nloe));
|
|
1265
|
+
HVX_Vector r0_c1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r0_q, vy1_q, nloe));
|
|
1266
|
+
HVX_Vector r1_c0_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r1_q, vy0_q, nloe));
|
|
1267
|
+
HVX_Vector r1_c1_ia = Q6_Vsf_equals_Vw(hvx_vec_rmpy_x8_partial(r1_q, vy1_q, nloe));
|
|
1268
|
+
|
|
1269
|
+
HVX_Vector vy0_d = *(const HVX_UVector *) (y0_d + i * y_dblk_size);
|
|
1270
|
+
HVX_Vector vy1_d = *(const HVX_UVector *) (y1_d + i * y_dblk_size);
|
|
1271
|
+
HVX_Vector r0_d = *(const HVX_UVector *) (r0_x_d + i * x_dblk_size);
|
|
1272
|
+
HVX_Vector r1_d = *(const HVX_UVector *) (r1_x_d + i * x_dblk_size);
|
|
1273
|
+
|
|
1274
|
+
// Convert vy_d from fp16 to fp32 while applying 0.5 scaling which is used for e8m0 halving
|
|
1275
|
+
HVX_Vector half = Q6_Vh_vsplat_R(0x3800); // 0.5 in fp16
|
|
1276
|
+
vy0_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy0_d), half));
|
|
1277
|
+
vy0_d = Q6_Vsf_equals_Vqf32(vy0_d);
|
|
1278
|
+
vy1_d = Q6_V_lo_W(Q6_Wqf32_vmpy_VhfVhf(Q6_Vh_vshuff_Vh(vy1_d), half));
|
|
1279
|
+
vy1_d = Q6_Vsf_equals_Vqf32(vy1_d);
|
|
1280
|
+
|
|
1281
|
+
// Convert rX_d scales from e8m0 to fp32
|
|
1282
|
+
// Expand and zero-pad 32x uint8 e8m0 values to uint32s : 0 0 0 0, 0 0 0 1, 0 0 0 2, ...
|
|
1283
|
+
// Left shift with zero fill to create FP32
|
|
1284
|
+
// FIXME: might need to handle zero as a special case (see ggml-cpu code)
|
|
1285
|
+
HVX_Vector expand = *(const HVX_Vector *) expand_x32_e8m0;
|
|
1286
|
+
HVX_Vector e8m0_mask = Q6_V_vsplat_R(0x000000ff);
|
|
1287
|
+
r0_d = Q6_V_vdelta_VV(r0_d, expand);
|
|
1288
|
+
r0_d = Q6_V_vand_VV(r0_d, e8m0_mask);
|
|
1289
|
+
r0_d = Q6_Vw_vasl_VwR(r0_d, 23);
|
|
1290
|
+
r1_d = Q6_V_vdelta_VV(r1_d, expand);
|
|
1291
|
+
r1_d = Q6_V_vand_VV(r1_d, e8m0_mask);
|
|
1292
|
+
r1_d = Q6_Vw_vasl_VwR(r1_d, 23);
|
|
1293
|
+
|
|
1294
|
+
HVX_Vector r0_c0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy0_d));
|
|
1295
|
+
HVX_Vector r0_c1_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r0_d, vy1_d));
|
|
1296
|
+
HVX_Vector r1_c0_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r1_d, vy0_d));
|
|
1297
|
+
HVX_Vector r1_c1_dd = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(r1_d, vy1_d));
|
|
1298
|
+
|
|
1299
|
+
// Zero out unused scales
|
|
1300
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe / 8);
|
|
1301
|
+
r0_c0_dd = Q6_V_vand_QV(bmask, r0_c0_dd);
|
|
1302
|
+
r0_c1_dd = Q6_V_vand_QV(bmask, r0_c1_dd);
|
|
1303
|
+
r1_c0_dd = Q6_V_vand_QV(bmask, r1_c0_dd);
|
|
1304
|
+
r1_c1_dd = Q6_V_vand_QV(bmask, r1_c1_dd);
|
|
1305
|
+
r0_c0_ia = Q6_V_vand_QV(bmask, r0_c0_ia);
|
|
1306
|
+
r0_c1_ia = Q6_V_vand_QV(bmask, r0_c1_ia);
|
|
1307
|
+
r1_c0_ia = Q6_V_vand_QV(bmask, r1_c0_ia);
|
|
1308
|
+
r1_c1_ia = Q6_V_vand_QV(bmask, r1_c1_ia);
|
|
1309
|
+
|
|
1310
|
+
HVX_Vector r0_c0_fa = Q6_Vqf32_vmpy_VsfVsf(r0_c0_ia, r0_c0_dd);
|
|
1311
|
+
HVX_Vector r0_c1_fa = Q6_Vqf32_vmpy_VsfVsf(r0_c1_ia, r0_c1_dd);
|
|
1312
|
+
HVX_Vector r1_c0_fa = Q6_Vqf32_vmpy_VsfVsf(r1_c0_ia, r1_c0_dd);
|
|
1313
|
+
HVX_Vector r1_c1_fa = Q6_Vqf32_vmpy_VsfVsf(r1_c1_ia, r1_c1_dd);
|
|
1314
|
+
|
|
1315
|
+
r0_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_c0_fa, r0_c0_sum));
|
|
1316
|
+
r0_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r0_c1_fa, r0_c1_sum));
|
|
1317
|
+
r1_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_c0_fa, r1_c0_sum));
|
|
1318
|
+
r1_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_Vqf32Vsf(r1_c1_fa, r1_c1_sum));
|
|
1319
|
+
}
|
|
1320
|
+
|
|
1321
|
+
// Reduce and store results
|
|
1322
|
+
HVX_Vector r0_r1_c0_sum = hvx_vec_reduce_sum_f32x2(r0_c0_sum, r1_c0_sum);
|
|
1323
|
+
HVX_Vector r0_r1_c1_sum = hvx_vec_reduce_sum_f32x2(r0_c1_sum, r1_c1_sum);
|
|
1324
|
+
|
|
1325
|
+
hvx_vec_store_u(&s0[0], 8, r0_r1_c0_sum); // row0,col0 row1,col0
|
|
1326
|
+
hvx_vec_store_u(&s1[0], 8, r0_r1_c1_sum); // row0,col1 row1,col1
|
|
1327
|
+
}
|
|
1328
|
+
|
|
1329
|
+
static void vec_dot_f16_f16_aa_1x1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
|
|
1330
|
+
const HVX_Vector * restrict x = (const HVX_Vector *) vx;
|
|
1331
|
+
const HVX_Vector * restrict y = (const HVX_Vector *) vy;
|
|
1332
|
+
|
|
1333
|
+
uint32_t nvec = n / VLEN_FP16; // num full fp16 hvx vectors
|
|
1334
|
+
uint32_t nloe = n % VLEN_FP16; // leftover elements
|
|
1335
|
+
|
|
1336
|
+
HVX_VectorPair rsum_p = Q6_W_vzero();
|
|
1337
|
+
|
|
1338
|
+
uint32_t i = 0;
|
|
1339
|
+
|
|
1340
|
+
#pragma unroll(4)
|
|
1341
|
+
for (i = 0; i < nvec; i++) {
|
|
1342
|
+
rsum_p = hvx_vec_mpyacc_f32_f16(rsum_p, x[i], y[i]);
|
|
1343
|
+
}
|
|
1344
|
+
|
|
1345
|
+
if (nloe) {
|
|
1346
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe * 2);
|
|
1347
|
+
HVX_Vector x_hf = Q6_V_vand_QV(bmask, x[i]);
|
|
1348
|
+
HVX_Vector y_hf = Q6_V_vand_QV(bmask, y[i]);
|
|
1349
|
+
rsum_p = hvx_vec_mpyacc_f32_f16(rsum_p, x_hf, y_hf);
|
|
1350
|
+
}
|
|
1351
|
+
|
|
1352
|
+
HVX_Vector rsum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(Q6_V_lo_W(rsum_p), Q6_V_hi_W(rsum_p)));
|
|
1353
|
+
hvx_vec_store_u(s, 4, hvx_vec_reduce_sum_f32(rsum));
|
|
1354
|
+
}
|
|
1355
|
+
|
|
1356
|
+
static void vec_dot_f16_f16_aa_2x1(const int n, float * restrict s0,
|
|
1357
|
+
const void * restrict vx0, const void * restrict vx1,
|
|
1358
|
+
const void * restrict vy0) {
|
|
1359
|
+
const HVX_Vector * restrict x0 = (const HVX_Vector *) vx0;
|
|
1360
|
+
const HVX_Vector * restrict x1 = (const HVX_Vector *) vx1;
|
|
1361
|
+
const HVX_Vector * restrict y = (const HVX_Vector *) vy0;
|
|
1362
|
+
|
|
1363
|
+
uint32_t nvec = n / VLEN_FP16;
|
|
1364
|
+
uint32_t nloe = n % VLEN_FP16;
|
|
1365
|
+
|
|
1366
|
+
HVX_VectorPair rsum0_p = Q6_W_vzero();
|
|
1367
|
+
HVX_VectorPair rsum1_p = Q6_W_vzero();
|
|
1368
|
+
|
|
1369
|
+
uint32_t i = 0;
|
|
1370
|
+
|
|
1371
|
+
#pragma unroll(2)
|
|
1372
|
+
for (i = 0; i < nvec; i++) {
|
|
1373
|
+
HVX_Vector y_hf = y[i];
|
|
1374
|
+
rsum0_p = hvx_vec_mpyacc_f32_f16(rsum0_p, x0[i], y_hf);
|
|
1375
|
+
rsum1_p = hvx_vec_mpyacc_f32_f16(rsum1_p, x1[i], y_hf);
|
|
1376
|
+
}
|
|
1377
|
+
|
|
1378
|
+
if (nloe) {
|
|
1379
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe * 2);
|
|
1380
|
+
HVX_Vector y_hf = Q6_V_vand_QV(bmask, y[i]);
|
|
1381
|
+
HVX_Vector x0_hf = Q6_V_vand_QV(bmask, x0[i]);
|
|
1382
|
+
HVX_Vector x1_hf = Q6_V_vand_QV(bmask, x1[i]);
|
|
1383
|
+
rsum0_p = hvx_vec_mpyacc_f32_f16(rsum0_p, x0_hf, y_hf);
|
|
1384
|
+
rsum1_p = hvx_vec_mpyacc_f32_f16(rsum1_p, x1_hf, y_hf);
|
|
1385
|
+
}
|
|
1386
|
+
|
|
1387
|
+
HVX_Vector rsum0 = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(Q6_V_lo_W(rsum0_p), Q6_V_hi_W(rsum0_p)));
|
|
1388
|
+
HVX_Vector rsum1 = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(Q6_V_lo_W(rsum1_p), Q6_V_hi_W(rsum1_p)));
|
|
1389
|
+
HVX_Vector rsum = hvx_vec_reduce_sum_f32x2(rsum0, rsum1);
|
|
1390
|
+
hvx_vec_store_u(s0, 8, rsum);
|
|
1391
|
+
}
|
|
1392
|
+
|
|
1393
|
+
static void vec_dot_f16_f16_aa_2x2(const int n, float * restrict s0, float * restrict s1,
|
|
1394
|
+
const void * restrict vx0, const void * restrict vx1,
|
|
1395
|
+
const void * restrict vy0, const void * restrict vy1) {
|
|
1396
|
+
const HVX_Vector * restrict x0 = (const HVX_Vector *) vx0;
|
|
1397
|
+
const HVX_Vector * restrict x1 = (const HVX_Vector *) vx1;
|
|
1398
|
+
const HVX_Vector * restrict y0 = (const HVX_Vector *) vy0;
|
|
1399
|
+
const HVX_Vector * restrict y1 = (const HVX_Vector *) vy1;
|
|
1400
|
+
|
|
1401
|
+
uint32_t nvec = n / VLEN_FP16;
|
|
1402
|
+
uint32_t nloe = n % VLEN_FP16;
|
|
1403
|
+
|
|
1404
|
+
// Row sums (sf) - 4 accumulators for 2×2 tile
|
|
1405
|
+
HVX_VectorPair r0_c0_sum_p = Q6_W_vzero();
|
|
1406
|
+
HVX_VectorPair r0_c1_sum_p = Q6_W_vzero();
|
|
1407
|
+
HVX_VectorPair r1_c0_sum_p = Q6_W_vzero();
|
|
1408
|
+
HVX_VectorPair r1_c1_sum_p = Q6_W_vzero();
|
|
1409
|
+
|
|
1410
|
+
uint32_t i = 0;
|
|
1411
|
+
|
|
1412
|
+
#pragma unroll(2)
|
|
1413
|
+
for (i = 0; i < nvec; i++) {
|
|
1414
|
+
HVX_Vector r0_hf = x0[i];
|
|
1415
|
+
HVX_Vector r1_hf = x1[i];
|
|
1416
|
+
HVX_Vector c0_hf = y0[i];
|
|
1417
|
+
HVX_Vector c1_hf = y1[i];
|
|
1418
|
+
|
|
1419
|
+
// Compute 4 dot products: r0×c0, r0×c1, r1×c0, r1×c1
|
|
1420
|
+
r0_c0_sum_p = hvx_vec_mpyacc_f32_f16(r0_c0_sum_p, r0_hf, c0_hf);
|
|
1421
|
+
r0_c1_sum_p = hvx_vec_mpyacc_f32_f16(r0_c1_sum_p, r0_hf, c1_hf);
|
|
1422
|
+
r1_c0_sum_p = hvx_vec_mpyacc_f32_f16(r1_c0_sum_p, r1_hf, c0_hf);
|
|
1423
|
+
r1_c1_sum_p = hvx_vec_mpyacc_f32_f16(r1_c1_sum_p, r1_hf, c1_hf);
|
|
1424
|
+
}
|
|
1425
|
+
|
|
1426
|
+
if (nloe) {
|
|
1427
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe * 2);
|
|
1428
|
+
|
|
1429
|
+
HVX_Vector r0_hf = Q6_V_vand_QV(bmask, x0[i]);
|
|
1430
|
+
HVX_Vector r1_hf = Q6_V_vand_QV(bmask, x1[i]);
|
|
1431
|
+
HVX_Vector c0_hf = Q6_V_vand_QV(bmask, y0[i]);
|
|
1432
|
+
HVX_Vector c1_hf = Q6_V_vand_QV(bmask, y1[i]);
|
|
1433
|
+
|
|
1434
|
+
r0_c0_sum_p = hvx_vec_mpyacc_f32_f16(r0_c0_sum_p, r0_hf, c0_hf);
|
|
1435
|
+
r0_c1_sum_p = hvx_vec_mpyacc_f32_f16(r0_c1_sum_p, r0_hf, c1_hf);
|
|
1436
|
+
r1_c0_sum_p = hvx_vec_mpyacc_f32_f16(r1_c0_sum_p, r1_hf, c0_hf);
|
|
1437
|
+
r1_c1_sum_p = hvx_vec_mpyacc_f32_f16(r1_c1_sum_p, r1_hf, c1_hf);
|
|
1438
|
+
}
|
|
1439
|
+
|
|
1440
|
+
HVX_Vector r0_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(Q6_V_lo_W(r0_c0_sum_p), Q6_V_hi_W(r0_c0_sum_p)));
|
|
1441
|
+
HVX_Vector r0_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(Q6_V_lo_W(r0_c1_sum_p), Q6_V_hi_W(r0_c1_sum_p)));
|
|
1442
|
+
HVX_Vector r1_c0_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(Q6_V_lo_W(r1_c0_sum_p), Q6_V_hi_W(r1_c0_sum_p)));
|
|
1443
|
+
HVX_Vector r1_c1_sum = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vadd_VsfVsf(Q6_V_lo_W(r1_c1_sum_p), Q6_V_hi_W(r1_c1_sum_p)));
|
|
1444
|
+
|
|
1445
|
+
// Reduce and store results
|
|
1446
|
+
HVX_Vector r0_r1_c0_sum = hvx_vec_reduce_sum_f32x2(r0_c0_sum, r1_c0_sum);
|
|
1447
|
+
HVX_Vector r0_r1_c1_sum = hvx_vec_reduce_sum_f32x2(r0_c1_sum, r1_c1_sum);
|
|
1448
|
+
|
|
1449
|
+
hvx_vec_store_u(&s0[0], 8, r0_r1_c0_sum); // row0,col0 row1,col0
|
|
1450
|
+
hvx_vec_store_u(&s1[0], 8, r0_r1_c1_sum); // row0,col1 row1,col1
|
|
1451
|
+
}
|
|
1452
|
+
|
|
1453
|
+
static void vec_dot_f16_f16_uu_1x1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
|
|
1454
|
+
const HVX_UVector * restrict x = (const HVX_UVector *) vx;
|
|
1455
|
+
const HVX_UVector * restrict y = (const HVX_UVector *) vy;
|
|
1456
|
+
|
|
1457
|
+
uint32_t nvec = n / VLEN_FP16; // num full fp16 hvx vectors
|
|
1458
|
+
uint32_t nloe = n % VLEN_FP16; // leftover elements
|
|
1459
|
+
|
|
1460
|
+
HVX_Vector rsum = Q6_V_vzero();
|
|
1461
|
+
|
|
1462
|
+
uint32_t i = 0;
|
|
1463
|
+
|
|
1464
|
+
#pragma unroll(4)
|
|
1465
|
+
for (i = 0; i < nvec; i++) {
|
|
1466
|
+
HVX_VectorPair xy_qf = Q6_Wqf32_vmpy_VhfVhf(x[i], y[i]);
|
|
1467
|
+
rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, Q6_Vqf32_vadd_Vqf32Vqf32(Q6_V_lo_W(xy_qf), Q6_V_hi_W(xy_qf)));
|
|
1468
|
+
}
|
|
1469
|
+
|
|
1470
|
+
if (nloe) {
|
|
1471
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe * 2);
|
|
1472
|
+
HVX_Vector x_hf = Q6_V_vand_QV(bmask, x[i]);
|
|
1473
|
+
HVX_Vector y_hf = Q6_V_vand_QV(bmask, y[i]);
|
|
1474
|
+
|
|
1475
|
+
HVX_VectorPair xy_qf = Q6_Wqf32_vmpy_VhfVhf(x_hf, y_hf);
|
|
1476
|
+
rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, Q6_Vqf32_vadd_Vqf32Vqf32(Q6_V_lo_W(xy_qf), Q6_V_hi_W(xy_qf)));
|
|
1477
|
+
}
|
|
1478
|
+
|
|
1479
|
+
rsum = hvx_vec_reduce_sum_f32(Q6_Vsf_equals_Vqf32(rsum));
|
|
1480
|
+
hvx_vec_store_u(&s[0], 4, rsum);
|
|
1481
|
+
}
|
|
1482
|
+
|
|
1483
|
+
static void vec_dot_f16_f32_uu_1x1(const int n, float * restrict s, const void * restrict x, const void * restrict y) {
|
|
1484
|
+
const HVX_UVector * restrict vx = (const HVX_UVector * restrict) x;
|
|
1485
|
+
const HVX_UVector * restrict vy = (const HVX_UVector * restrict) y;
|
|
1486
|
+
|
|
1487
|
+
uint32_t nvec = n / VLEN_FP16; // num full fp16 hvx vectors
|
|
1488
|
+
uint32_t nloe = n % VLEN_FP16; // leftover elements
|
|
1489
|
+
|
|
1490
|
+
const HVX_Vector zero = Q6_V_vzero();
|
|
1491
|
+
|
|
1492
|
+
HVX_Vector rsum = Q6_V_vzero();
|
|
1493
|
+
|
|
1494
|
+
uint32_t i = 0;
|
|
1495
|
+
|
|
1496
|
+
#pragma unroll(2)
|
|
1497
|
+
for (i = 0; i < nvec; i++) {
|
|
1498
|
+
// Load y (fp32) and convert into fp16
|
|
1499
|
+
HVX_Vector y0_qf = Q6_Vqf32_vsub_VsfVsf(vy[i*2+0], zero); // 32 elements
|
|
1500
|
+
HVX_Vector y1_qf = Q6_Vqf32_vsub_VsfVsf(vy[i*2+1], zero); // 32 elements
|
|
1501
|
+
HVX_Vector y_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(y1_qf, y0_qf)));
|
|
1502
|
+
|
|
1503
|
+
// Load x (fp16)
|
|
1504
|
+
HVX_Vector x_hf = vx[i];
|
|
1505
|
+
|
|
1506
|
+
HVX_VectorPair xy_qf = Q6_Wqf32_vmpy_VhfVhf(x_hf, y_hf);
|
|
1507
|
+
|
|
1508
|
+
rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, Q6_Vqf32_vadd_Vqf32Vqf32(Q6_V_lo_W(xy_qf), Q6_V_hi_W(xy_qf)));
|
|
1509
|
+
}
|
|
1510
|
+
|
|
1511
|
+
if (nloe) {
|
|
1512
|
+
// Load y (fp32) and convert into fp16
|
|
1513
|
+
HVX_Vector y0_qf = Q6_Vqf32_vsub_VsfVsf(vy[i*2+0], zero); // 32 elements
|
|
1514
|
+
HVX_Vector y1_qf = Q6_Vqf32_vsub_VsfVsf(vy[i*2+1], zero); // 32 elements
|
|
1515
|
+
HVX_Vector y_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(y1_qf, y0_qf)));
|
|
1516
|
+
|
|
1517
|
+
// Load x (fp16)
|
|
1518
|
+
HVX_Vector x_hf = vx[i];
|
|
1519
|
+
|
|
1520
|
+
// Zero-out unused elements
|
|
1521
|
+
// Note that we need to clear both x and y because they may contain NANs
|
|
1522
|
+
HVX_VectorPred bmask = Q6_Q_vsetq_R(nloe * 2);
|
|
1523
|
+
x_hf = Q6_V_vand_QV(bmask, x_hf);
|
|
1524
|
+
y_hf = Q6_V_vand_QV(bmask, y_hf);
|
|
1525
|
+
|
|
1526
|
+
HVX_VectorPair xy_qf = Q6_Wqf32_vmpy_VhfVhf(x_hf, y_hf);
|
|
1527
|
+
|
|
1528
|
+
rsum = Q6_Vqf32_vadd_Vqf32Vqf32(rsum, Q6_Vqf32_vadd_Vqf32Vqf32(Q6_V_lo_W(xy_qf), Q6_V_hi_W(xy_qf)));
|
|
1529
|
+
}
|
|
1530
|
+
|
|
1531
|
+
// Convert into fp32 and reduce
|
|
1532
|
+
rsum = hvx_vec_reduce_sum_f32(Q6_Vsf_equals_Vqf32(rsum));
|
|
1533
|
+
hvx_vec_store_u(&s[0], 4, rsum);
|
|
1534
|
+
}
|
|
1535
|
+
|
|
1536
|
+
#define htp_matmul_tensors_preamble \
|
|
1537
|
+
struct htp_tensor * restrict src0 = &octx->src0; \
|
|
1538
|
+
struct htp_tensor * restrict src1 = &octx->src1; \
|
|
1539
|
+
struct htp_tensor * restrict src2 = &octx->src2; \
|
|
1540
|
+
struct htp_tensor * restrict dst = &octx->dst; \
|
|
1541
|
+
struct htp_spad * restrict src0_spad = &octx->src0_spad; \
|
|
1542
|
+
struct htp_spad * restrict src1_spad = &octx->src1_spad; \
|
|
1543
|
+
struct htp_spad * restrict dst_spad = &octx->dst_spad; \
|
|
1544
|
+
\
|
|
1545
|
+
const uint32_t ne00 = src0->ne[0]; \
|
|
1546
|
+
const uint32_t ne01 = src0->ne[1]; \
|
|
1547
|
+
const uint32_t ne02 = src0->ne[2]; \
|
|
1548
|
+
const uint32_t ne03 = src0->ne[3]; \
|
|
1549
|
+
\
|
|
1550
|
+
const uint32_t ne10 = src1->ne[0]; \
|
|
1551
|
+
const uint32_t ne11 = src1->ne[1]; \
|
|
1552
|
+
const uint32_t ne12 = src1->ne[2]; \
|
|
1553
|
+
const uint32_t ne13 = src1->ne[3]; \
|
|
1554
|
+
\
|
|
1555
|
+
const uint32_t ne20 = src2->ne[0]; \
|
|
1556
|
+
const uint32_t ne21 = src2->ne[1]; \
|
|
1557
|
+
const uint32_t ne22 = src2->ne[2]; \
|
|
1558
|
+
const uint32_t ne23 = src2->ne[3]; \
|
|
1559
|
+
\
|
|
1560
|
+
const uint32_t ne0 = dst->ne[0]; \
|
|
1561
|
+
const uint32_t ne1 = dst->ne[1]; \
|
|
1562
|
+
const uint32_t ne2 = dst->ne[2]; \
|
|
1563
|
+
const uint32_t ne3 = dst->ne[3]; \
|
|
1564
|
+
\
|
|
1565
|
+
const uint32_t nb00 = src0->nb[0]; \
|
|
1566
|
+
const uint32_t nb01 = src0->nb[1]; \
|
|
1567
|
+
const uint32_t nb02 = src0->nb[2]; \
|
|
1568
|
+
const uint32_t nb03 = src0->nb[3]; \
|
|
1569
|
+
\
|
|
1570
|
+
const uint32_t nb10 = src1->nb[0]; \
|
|
1571
|
+
const uint32_t nb11 = src1->nb[1]; \
|
|
1572
|
+
const uint32_t nb12 = src1->nb[2]; \
|
|
1573
|
+
const uint32_t nb13 = src1->nb[3]; \
|
|
1574
|
+
\
|
|
1575
|
+
const uint32_t nb0 = dst->nb[0]; \
|
|
1576
|
+
const uint32_t nb1 = dst->nb[1]; \
|
|
1577
|
+
const uint32_t nb2 = dst->nb[2]; \
|
|
1578
|
+
const uint32_t nb3 = dst->nb[3];
|
|
1579
|
+
|
|
1580
|
+
#define htp_matmul_preamble \
|
|
1581
|
+
struct htp_matmul_context * mmctx = data; \
|
|
1582
|
+
struct htp_ops_context * octx = mmctx->octx; \
|
|
1583
|
+
htp_matmul_tensors_preamble; \
|
|
1584
|
+
dma_queue *dma_queue = octx->ctx->dma[ith]; \
|
|
1585
|
+
uint32_t src0_nrows_per_thread = mmctx->src0_nrows_per_thread;
|
|
1586
|
+
|
|
1587
|
+
// *** matmul with support for 4d tensors and full broadcasting
|
|
1588
|
+
|
|
1589
|
+
static void matmul_4d(unsigned int nth, unsigned int ith, void * data) {
|
|
1590
|
+
htp_matmul_preamble;
|
|
1591
|
+
|
|
1592
|
+
uint64_t t1, t2;
|
|
1593
|
+
t1 = HAP_perf_get_qtimer_count();
|
|
1594
|
+
|
|
1595
|
+
assert(ne12 % ne02 == 0);
|
|
1596
|
+
assert(ne13 % ne03 == 0);
|
|
1597
|
+
|
|
1598
|
+
// This is the size of the first dimension of the result, so we can iterate that way. (see the ASSERT above, these are the same numbers)
|
|
1599
|
+
const uint32_t nr0 = ne0;
|
|
1600
|
+
|
|
1601
|
+
// This is the size of the rest of the dimensions of the result
|
|
1602
|
+
const uint32_t nr1 = ne1 * ne2 * ne3;
|
|
1603
|
+
|
|
1604
|
+
// distribute the thread work across the inner or outer loop based on which one is larger
|
|
1605
|
+
uint32_t nchunk0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
|
|
1606
|
+
uint32_t nchunk1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
|
|
1607
|
+
|
|
1608
|
+
// The number of elements in each chunk
|
|
1609
|
+
const uint32_t dr0 = (nr0 + nchunk0 - 1) / nchunk0;
|
|
1610
|
+
const uint32_t dr1 = (nr1 + nchunk1 - 1) / nchunk1;
|
|
1611
|
+
|
|
1612
|
+
uint32_t current_chunk = ith;
|
|
1613
|
+
|
|
1614
|
+
const uint32_t ith0 = current_chunk % nchunk0;
|
|
1615
|
+
const uint32_t ith1 = current_chunk / nchunk0;
|
|
1616
|
+
|
|
1617
|
+
const uint32_t ir0_start = dr0 * ith0;
|
|
1618
|
+
const uint32_t ir0_end = MIN(ir0_start + dr0, nr0);
|
|
1619
|
+
|
|
1620
|
+
const uint32_t ir1_start = dr1 * ith1;
|
|
1621
|
+
const uint32_t ir1_end = MIN(ir1_start + dr1, nr1);
|
|
1622
|
+
|
|
1623
|
+
// no work for this thread
|
|
1624
|
+
if (ir0_start >= ir0_end || ir1_start >= ir1_end) {
|
|
1625
|
+
return;
|
|
1626
|
+
}
|
|
1627
|
+
|
|
1628
|
+
// block-tiling attempt
|
|
1629
|
+
const uint32_t blck_0 = 64;
|
|
1630
|
+
const uint32_t blck_1 = 64;
|
|
1631
|
+
|
|
1632
|
+
for (uint32_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) {
|
|
1633
|
+
for (uint32_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) {
|
|
1634
|
+
for (uint32_t ir1 = iir1; ir1 < MIN(iir1 + blck_1, ir1_end); ir1++) {
|
|
1635
|
+
const uint32_t i13 = fastdiv(ir1, &mmctx->mm_div_ne12_ne1);
|
|
1636
|
+
const uint32_t i12 = fastdiv(ir1 - i13 * ne12 * ne1, &mmctx->mm_div_ne1);
|
|
1637
|
+
const uint32_t i11 = (ir1 - i13 * ne12 * ne1 - i12 * ne1);
|
|
1638
|
+
|
|
1639
|
+
// broadcast src0 into src1
|
|
1640
|
+
const uint32_t i03 = fastdiv(i13, &mmctx->mm_div_r3);
|
|
1641
|
+
const uint32_t i02 = fastdiv(i12, &mmctx->mm_div_r2);
|
|
1642
|
+
|
|
1643
|
+
const uint32_t i1 = i11;
|
|
1644
|
+
const uint32_t i2 = i12;
|
|
1645
|
+
const uint32_t i3 = i13;
|
|
1646
|
+
|
|
1647
|
+
const uint8_t * restrict src0_base = (const uint8_t *) src0->data + (0 + i02 * nb02 + i03 * nb03);
|
|
1648
|
+
const uint8_t * restrict src1_col = (const uint8_t *) src1->data + (i11 * nb11 + i12 * nb12 + i13 * nb13);
|
|
1649
|
+
float * dst_col = (float *) ((uint8_t * restrict) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3));
|
|
1650
|
+
|
|
1651
|
+
const uint32_t ir0_block_end = MIN(iir0 + blck_0, ir0_end);
|
|
1652
|
+
for (uint32_t ir0 = iir0; ir0 < ir0_block_end; ir0++) {
|
|
1653
|
+
const uint8_t * restrict src0_row = src0_base + ir0 * nb01;
|
|
1654
|
+
mmctx->vec_dot_1x1(ne00, &dst_col[ir0], src0_row, src1_col);
|
|
1655
|
+
}
|
|
1656
|
+
}
|
|
1657
|
+
}
|
|
1658
|
+
}
|
|
1659
|
+
|
|
1660
|
+
t2 = HAP_perf_get_qtimer_count();
|
|
1661
|
+
|
|
1662
|
+
FARF(HIGH, "matmul-4d %d/%d: %ux%ux%ux%u (%u:%u %u:%u) * %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", ith, nth,
|
|
1663
|
+
src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], ir0_start, ir0_end, ir1_start, ir1_end, src1->ne[0],
|
|
1664
|
+
src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3],
|
|
1665
|
+
(unsigned) HAP_perf_qtimer_count_to_us(t2 - t1));
|
|
1666
|
+
}
|
|
1667
|
+
|
|
1668
|
+
// src1 tensor is already in VTCM spad
|
|
1669
|
+
static void matmul_2d(unsigned int nth, unsigned int ith, void * data) {
|
|
1670
|
+
htp_matmul_preamble;
|
|
1671
|
+
|
|
1672
|
+
const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows
|
|
1673
|
+
const uint32_t src1_nrows = ne11 * ne12 * ne13; // src1 rows
|
|
1674
|
+
|
|
1675
|
+
const uint32_t src0_start_row = src0_nrows_per_thread * ith;
|
|
1676
|
+
const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows);
|
|
1677
|
+
const uint32_t src0_end_row_x2 = src0_start_row + ((src0_end_row - src0_start_row) & ~1U);
|
|
1678
|
+
|
|
1679
|
+
// no work for this thread
|
|
1680
|
+
if (src0_start_row >= src0_end_row) {
|
|
1681
|
+
return;
|
|
1682
|
+
}
|
|
1683
|
+
|
|
1684
|
+
const size_t dst_row_size = nb1;
|
|
1685
|
+
const size_t src0_row_size = nb01;
|
|
1686
|
+
const size_t src1_row_size = nb11;
|
|
1687
|
+
|
|
1688
|
+
const size_t src0_stride = src0_spad->stride;
|
|
1689
|
+
const size_t src1_stride = src1_spad->stride;
|
|
1690
|
+
|
|
1691
|
+
// Per-thread VTCM scratchpads for all tensors
|
|
1692
|
+
// Note that the entire src1 tensor is already in VTCM
|
|
1693
|
+
// For other tensors we allocate N rows per thread, padded to HVX vector size
|
|
1694
|
+
uint8_t * restrict spad_dst = dst_spad->data + dst_spad->size_per_thread * ith;
|
|
1695
|
+
uint8_t * restrict spad_src0 = src0_spad->data + src0_spad->size_per_thread * ith;
|
|
1696
|
+
uint8_t * restrict src1_data = src1_spad->data;
|
|
1697
|
+
|
|
1698
|
+
volatile uint64_t t1, t2;
|
|
1699
|
+
t1 = HAP_perf_get_qtimer_count();
|
|
1700
|
+
|
|
1701
|
+
const uint8_t * restrict src0_row = (const uint8_t *) src0->data;
|
|
1702
|
+
|
|
1703
|
+
// Prefill spad with src0 rows
|
|
1704
|
+
#pragma unroll(4)
|
|
1705
|
+
for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) {
|
|
1706
|
+
const int is0 = (ir0 - src0_start_row);
|
|
1707
|
+
if (is0 >= MM_SPAD_SRC0_NROWS) {
|
|
1708
|
+
break;
|
|
1709
|
+
}
|
|
1710
|
+
dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_stride, src0_row + ir0 * src0_row_size),
|
|
1711
|
+
src0_stride, src0_row_size, 2);
|
|
1712
|
+
}
|
|
1713
|
+
|
|
1714
|
+
// Process src0 rows
|
|
1715
|
+
for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) {
|
|
1716
|
+
const uint8_t * ss0 = dma_queue_pop(dma_queue).dst;
|
|
1717
|
+
|
|
1718
|
+
// Process src1 columns in pairs (2×2 tiling)
|
|
1719
|
+
uint32_t ir1 = 0;
|
|
1720
|
+
for (; ir1 + 1 < src1_nrows; ir1 += 2) {
|
|
1721
|
+
const uint8_t * restrict src1_col0 = (const uint8_t *) (src1_data + (ir1+0) * src1_stride);
|
|
1722
|
+
const uint8_t * restrict src1_col1 = (const uint8_t *) (src1_data + (ir1+1) * src1_stride);
|
|
1723
|
+
float * restrict dst_row0 = (float *) (dst->data + ((ir1+0) * dst_row_size));
|
|
1724
|
+
float * restrict dst_row1 = (float *) (dst->data + ((ir1+1) * dst_row_size));
|
|
1725
|
+
mmctx->vec_dot_2x2(ne00, &dst_row0[ir0], &dst_row1[ir0], ss0, ss0 + src0_stride, src1_col0, src1_col1);
|
|
1726
|
+
}
|
|
1727
|
+
|
|
1728
|
+
// Handle remaining src1 rows (fallback to 2×1)
|
|
1729
|
+
for (; ir1 < src1_nrows; ++ir1) {
|
|
1730
|
+
const uint8_t * restrict src1_col = (const uint8_t *) (src1_data + ir1 * src1_stride);
|
|
1731
|
+
float * restrict dst_row = (float *) (dst->data + (ir1 * dst_row_size));
|
|
1732
|
+
mmctx->vec_dot_2x1(ne00, &dst_row[ir0], ss0, ss0 + src0_stride, src1_col);
|
|
1733
|
+
}
|
|
1734
|
+
|
|
1735
|
+
// Prefetch next (n + spad_nrows) row
|
|
1736
|
+
const int pr0 = (ir0 + MM_SPAD_SRC0_NROWS);
|
|
1737
|
+
const int is0 = (pr0 - src0_start_row) % MM_SPAD_SRC0_NROWS;
|
|
1738
|
+
if (pr0 < src0_end_row_x2) {
|
|
1739
|
+
dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_stride, src0_row + pr0 * src0_row_size),
|
|
1740
|
+
src0_stride, src0_row_size, 2);
|
|
1741
|
+
}
|
|
1742
|
+
}
|
|
1743
|
+
|
|
1744
|
+
// Process the last row (if any)
|
|
1745
|
+
if (src0_end_row != src0_end_row_x2) {
|
|
1746
|
+
uint32_t ir0 = src0_end_row_x2;
|
|
1747
|
+
const int is0 = (ir0 - src0_start_row);
|
|
1748
|
+
dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_stride, src0_row + ir0 * src0_row_size),
|
|
1749
|
+
src0_stride, src0_row_size, 1);
|
|
1750
|
+
const uint8_t * ss0 = dma_queue_pop(dma_queue).dst;
|
|
1751
|
+
|
|
1752
|
+
#pragma unroll(2)
|
|
1753
|
+
for (uint32_t ir1 = 0; ir1 < src1_nrows; ++ir1) {
|
|
1754
|
+
const uint8_t * restrict src1_col = (const uint8_t *) (src1_data + ir1 * src1_stride);
|
|
1755
|
+
float * restrict dst_row = (float *) (dst->data + (ir1 * dst_row_size));
|
|
1756
|
+
mmctx->vec_dot_1x1(ne00, &dst_row[ir0], ss0, src1_col);
|
|
1757
|
+
}
|
|
1758
|
+
}
|
|
1759
|
+
|
|
1760
|
+
t2 = HAP_perf_get_qtimer_count();
|
|
1761
|
+
|
|
1762
|
+
FARF(HIGH, "matmul-%s %d/%d: %ux%ux%ux%u (%u:%u) * %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", mmctx->type, ith, nth,
|
|
1763
|
+
src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], src1->ne[1],
|
|
1764
|
+
src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3],
|
|
1765
|
+
(unsigned) HAP_perf_qtimer_count_to_us(t2 - t1));
|
|
1766
|
+
}
|
|
1767
|
+
|
|
1768
|
+
// q8x4x2 src1 tensor is already in VTCM spad
|
|
1769
|
+
static void matvec_2d(unsigned int nth, unsigned int ith, void * data) {
|
|
1770
|
+
htp_matmul_preamble;
|
|
1771
|
+
|
|
1772
|
+
const uint32_t src0_nrows = ne01;
|
|
1773
|
+
|
|
1774
|
+
const uint32_t src0_start_row = src0_nrows_per_thread * ith;
|
|
1775
|
+
const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows);
|
|
1776
|
+
const uint32_t src0_end_row_x2 = src0_start_row + ((src0_end_row - src0_start_row) & ~1U);
|
|
1777
|
+
|
|
1778
|
+
// no work for this thread
|
|
1779
|
+
if (src0_start_row >= src0_end_row) {
|
|
1780
|
+
return;
|
|
1781
|
+
}
|
|
1782
|
+
|
|
1783
|
+
const size_t dst_row_size = nb1;
|
|
1784
|
+
const size_t src0_row_size = nb01;
|
|
1785
|
+
const size_t src1_row_size = nb11;
|
|
1786
|
+
|
|
1787
|
+
const size_t src0_stride = src0_spad->stride;
|
|
1788
|
+
const size_t src1_stride = src1_spad->stride;
|
|
1789
|
+
|
|
1790
|
+
// Per-thread VTCM scratchpads for all tensors
|
|
1791
|
+
// Note that the entire src1 tensor is already in VTCM
|
|
1792
|
+
// For other tensors we allocate N rows per thread, padded to HVX vector size
|
|
1793
|
+
uint8_t * spad_dst = dst_spad->data + dst_spad->size_per_thread * ith;
|
|
1794
|
+
uint8_t * spad_src0 = src0_spad->data + src0_spad->size_per_thread * ith;
|
|
1795
|
+
uint8_t * src1_data = src1_spad->data;
|
|
1796
|
+
|
|
1797
|
+
uint64_t t1, t2;
|
|
1798
|
+
t1 = HAP_perf_get_qtimer_count();
|
|
1799
|
+
|
|
1800
|
+
float * tmp = (float *) spad_dst;
|
|
1801
|
+
|
|
1802
|
+
const uint8_t * restrict src0_row = (const uint8_t *) src0->data;
|
|
1803
|
+
const uint8_t * restrict src1_col = (const uint8_t *) src1_data;
|
|
1804
|
+
float * restrict dst_col = (float *) dst->data;
|
|
1805
|
+
|
|
1806
|
+
// Prefill spad with 2x src0 rows
|
|
1807
|
+
#pragma unroll(2)
|
|
1808
|
+
for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) {
|
|
1809
|
+
const uint32_t is0 = (ir0 - src0_start_row);
|
|
1810
|
+
if (is0 >= MM_SPAD_SRC0_NROWS) {
|
|
1811
|
+
break;
|
|
1812
|
+
}
|
|
1813
|
+
dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_stride, src0_row + ir0 * src0_row_size),
|
|
1814
|
+
src0_stride, src0_row_size, 2);
|
|
1815
|
+
}
|
|
1816
|
+
|
|
1817
|
+
// Process src0 rows
|
|
1818
|
+
for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) {
|
|
1819
|
+
const uint8_t * ss0 = dma_queue_pop(dma_queue).dst;
|
|
1820
|
+
mmctx->vec_dot_2x1(ne00, &tmp[ir0 - src0_start_row], ss0, ss0 + src0_stride, src1_col);
|
|
1821
|
+
|
|
1822
|
+
// Prefetch next (n + spad_nrows) row
|
|
1823
|
+
const uint32_t pr0 = (ir0 + MM_SPAD_SRC0_NROWS);
|
|
1824
|
+
const uint32_t is0 = (pr0 - src0_start_row) % MM_SPAD_SRC0_NROWS;
|
|
1825
|
+
if (pr0 < src0_end_row_x2) {
|
|
1826
|
+
dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_stride, src0_row + pr0 * src0_row_size),
|
|
1827
|
+
src0_stride, src0_row_size, 2);
|
|
1828
|
+
}
|
|
1829
|
+
}
|
|
1830
|
+
|
|
1831
|
+
// Process the last row (if any)
|
|
1832
|
+
if (src0_end_row != src0_end_row_x2) {
|
|
1833
|
+
const uint32_t ir0 = src0_end_row_x2;
|
|
1834
|
+
const uint32_t is0 = (ir0 - src0_start_row);
|
|
1835
|
+
dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_stride, src0_row + ir0 * src0_row_size),
|
|
1836
|
+
src0_stride, src0_row_size, 1);
|
|
1837
|
+
const uint8_t * ss0 = dma_queue_pop(dma_queue).dst;
|
|
1838
|
+
mmctx->vec_dot_1x1(ne00, &tmp[ir0 - src0_start_row], ss0, src1_col);
|
|
1839
|
+
}
|
|
1840
|
+
|
|
1841
|
+
hvx_copy_f32_ua((uint8_t *) &dst_col[src0_start_row], (uint8_t *) tmp, src0_end_row - src0_start_row);
|
|
1842
|
+
|
|
1843
|
+
t2 = HAP_perf_get_qtimer_count();
|
|
1844
|
+
|
|
1845
|
+
FARF(HIGH, "matvec-%s %u/%u: %ux%ux%ux%u (%u:%u) * %ux%ux%ux%u -> %ux%ux%ux%u usec %u\n", mmctx->type, ith, nth,
|
|
1846
|
+
src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0], src1->ne[1],
|
|
1847
|
+
src1->ne[2], src1->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3],
|
|
1848
|
+
(unsigned) HAP_perf_qtimer_count_to_us(t2 - t1));
|
|
1849
|
+
}
|
|
1850
|
+
|
|
1851
|
+
#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id) * ids->ne[0] * ids->ne[1] + (i1)]
|
|
1852
|
+
|
|
1853
|
+
struct mmid_row_mapping {
|
|
1854
|
+
uint32_t i1;
|
|
1855
|
+
uint32_t i2;
|
|
1856
|
+
};
|
|
1857
|
+
|
|
1858
|
+
// src1 tensor is already in VTCM spad
|
|
1859
|
+
static void matmul_id(unsigned int nth, unsigned int ith, void * data) {
|
|
1860
|
+
htp_matmul_preamble;
|
|
1861
|
+
|
|
1862
|
+
struct htp_tensor * restrict ids = &octx->src2;
|
|
1863
|
+
struct htp_spad * restrict src2_spad = &octx->src2_spad;
|
|
1864
|
+
|
|
1865
|
+
uint64_t t1, t2;
|
|
1866
|
+
t1 = HAP_perf_get_qtimer_count();
|
|
1867
|
+
|
|
1868
|
+
const uint32_t src0_nrows = ne01; // src0 rows per expert
|
|
1869
|
+
const uint32_t src1_nrows = ne11;
|
|
1870
|
+
|
|
1871
|
+
const uint32_t src0_start_row = src0_nrows_per_thread * ith;
|
|
1872
|
+
const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows);
|
|
1873
|
+
const uint32_t src0_end_row_x2 = src0_start_row + ((src0_end_row - src0_start_row) & ~1U);
|
|
1874
|
+
|
|
1875
|
+
// no work for this thread
|
|
1876
|
+
if (src0_start_row >= src0_end_row) {
|
|
1877
|
+
return;
|
|
1878
|
+
}
|
|
1879
|
+
|
|
1880
|
+
const uint32_t n_ids = ids->ne[0]; // n_expert_used
|
|
1881
|
+
const uint32_t n_as = ne02; // n_expert
|
|
1882
|
+
|
|
1883
|
+
const size_t matrix_row_counts_size = n_as * sizeof(uint32_t);
|
|
1884
|
+
const size_t matrix_row_map_size = n_as * ids->ne[0] * ids->ne[1] * sizeof(struct mmid_row_mapping);
|
|
1885
|
+
|
|
1886
|
+
const uint32_t * matrix_row_counts = (const uint32_t *) src2_spad->data + 0;
|
|
1887
|
+
const struct mmid_row_mapping * matrix_rows = (const void *) src2_spad->data + matrix_row_counts_size;
|
|
1888
|
+
|
|
1889
|
+
const size_t dst_row_size = nb1;
|
|
1890
|
+
const size_t src0_row_size = nb01;
|
|
1891
|
+
const size_t src1_row_size = q8x4x2_row_size(ne10);
|
|
1892
|
+
|
|
1893
|
+
const size_t src0_row_size_padded = hex_round_up(src0_row_size, 128);
|
|
1894
|
+
|
|
1895
|
+
// Per-thread VTCM scratchpads for all tensors
|
|
1896
|
+
// Note that the entire src1 tensor is already in VTCM
|
|
1897
|
+
// For other tensors we allocate N rows per thread, padded to HVX vector size
|
|
1898
|
+
uint8_t * restrict spad_dst = dst_spad->data + dst_spad->size_per_thread * ith;
|
|
1899
|
+
uint8_t * restrict spad_src0 = src0_spad->data + src0_spad->size_per_thread * ith;
|
|
1900
|
+
uint8_t * restrict src1_data = src1_spad->data;
|
|
1901
|
+
|
|
1902
|
+
for (uint32_t cur_a = 0; cur_a < n_as; ++cur_a) {
|
|
1903
|
+
const int32_t cne1 = matrix_row_counts[cur_a];
|
|
1904
|
+
|
|
1905
|
+
if (cne1 == 0) {
|
|
1906
|
+
continue;
|
|
1907
|
+
}
|
|
1908
|
+
|
|
1909
|
+
const uint8_t * src0_row = (const uint8_t *) src0->data + (0 + cur_a * nb02 + 0);
|
|
1910
|
+
|
|
1911
|
+
// Prefill spad with src0 rows
|
|
1912
|
+
#pragma unroll(4)
|
|
1913
|
+
for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) {
|
|
1914
|
+
const int is0 = (ir0 - src0_start_row);
|
|
1915
|
+
if (is0 >= MM_SPAD_SRC0_NROWS) {
|
|
1916
|
+
break;
|
|
1917
|
+
}
|
|
1918
|
+
dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size),
|
|
1919
|
+
src0_row_size_padded, src0_row_size, 2);
|
|
1920
|
+
}
|
|
1921
|
+
|
|
1922
|
+
// Process src0 rows
|
|
1923
|
+
for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) {
|
|
1924
|
+
const uint8_t * ss0 = dma_queue_pop(dma_queue).dst;
|
|
1925
|
+
|
|
1926
|
+
for (uint32_t cid = 0; cid < cne1; ++cid) {
|
|
1927
|
+
struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, cid);
|
|
1928
|
+
const int rm1 = row_mapping.i1; // expert idx
|
|
1929
|
+
const int rm2 = row_mapping.i2; // token idx
|
|
1930
|
+
|
|
1931
|
+
const uint32_t ir1 = src1_nrows == 1 ? 0 : rm1; // src1 row idx
|
|
1932
|
+
const uint8_t * restrict src1_col = (const uint8_t *) (src1_data + (ir1 + rm2 * ne11 + 0) * src1_row_size);
|
|
1933
|
+
float * dst_row = (float *) (dst->data + (rm1 * nb1 + rm2 * nb2 + 0));
|
|
1934
|
+
|
|
1935
|
+
mmctx->vec_dot_2x1(ne00, &dst_row[ir0], ss0, ss0 + src0_row_size_padded, src1_col);
|
|
1936
|
+
}
|
|
1937
|
+
|
|
1938
|
+
// Prefetch next (n + spad_nrows) row
|
|
1939
|
+
const int pr0 = (ir0 + MM_SPAD_SRC0_NROWS);
|
|
1940
|
+
const int is0 = (pr0 - src0_start_row) % MM_SPAD_SRC0_NROWS;
|
|
1941
|
+
if (pr0 < src0_end_row_x2) {
|
|
1942
|
+
dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + pr0 * src0_row_size),
|
|
1943
|
+
src0_row_size_padded, src0_row_size, 2);
|
|
1944
|
+
}
|
|
1945
|
+
}
|
|
1946
|
+
|
|
1947
|
+
// Process the last row (if any)
|
|
1948
|
+
if (src0_end_row != src0_end_row_x2) {
|
|
1949
|
+
uint32_t ir0 = src0_end_row_x2;
|
|
1950
|
+
const uint32_t is0 = (ir0 - src0_start_row);
|
|
1951
|
+
dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size),
|
|
1952
|
+
src0_row_size_padded, src0_row_size, 1);
|
|
1953
|
+
const uint8_t * ss0 = dma_queue_pop(dma_queue).dst;
|
|
1954
|
+
|
|
1955
|
+
for (uint32_t cid = 0; cid < cne1; ++cid) {
|
|
1956
|
+
struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, cid);
|
|
1957
|
+
const int rm1 = row_mapping.i1; // expert idx
|
|
1958
|
+
const int rm2 = row_mapping.i2; // token idx
|
|
1959
|
+
|
|
1960
|
+
const uint32_t ir1 = src1_nrows == 1 ? 0 : rm1; // src1 row idx
|
|
1961
|
+
const uint8_t * restrict src1_col = (const uint8_t *) (src1_data + (ir1 + rm2 * ne11 + 0) * src1_row_size);
|
|
1962
|
+
float * dst_row = (float *) (dst->data + (rm1 * nb1 + rm2 * nb2 + 0));
|
|
1963
|
+
|
|
1964
|
+
mmctx->vec_dot_1x1(ne00, &dst_row[ir0], ss0, src1_col);
|
|
1965
|
+
}
|
|
1966
|
+
}
|
|
1967
|
+
}
|
|
1968
|
+
|
|
1969
|
+
t2 = HAP_perf_get_qtimer_count();
|
|
1970
|
+
|
|
1971
|
+
FARF(HIGH, "matmul-id-%s %d/%d: %ux%ux%ux%u (%u:%u) * %ux%ux%ux%u (%ux%ux%ux%u) -> %ux%ux%ux%u usec %u\n", mmctx->type,
|
|
1972
|
+
ith, nth, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0],
|
|
1973
|
+
src1->ne[1], src1->ne[2], src1->ne[3], ids->ne[0], ids->ne[1], ids->ne[2], ids->ne[3], dst->ne[0], dst->ne[1],
|
|
1974
|
+
dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1));
|
|
1975
|
+
}
|
|
1976
|
+
|
|
1977
|
+
// src1 tensor is already in VTCM spad
|
|
1978
|
+
static void matvec_id(unsigned int nth, unsigned int ith, void * data) {
|
|
1979
|
+
htp_matmul_preamble;
|
|
1980
|
+
|
|
1981
|
+
struct htp_tensor * restrict ids = &octx->src2;
|
|
1982
|
+
struct htp_spad * restrict src2_spad = &octx->src2_spad;
|
|
1983
|
+
|
|
1984
|
+
uint64_t t1, t2;
|
|
1985
|
+
t1 = HAP_perf_get_qtimer_count();
|
|
1986
|
+
|
|
1987
|
+
const uint32_t src0_nrows = ne01; // src0 rows per expert
|
|
1988
|
+
|
|
1989
|
+
const uint32_t src0_start_row = src0_nrows_per_thread * ith;
|
|
1990
|
+
const uint32_t src0_end_row = MIN(src0_start_row + src0_nrows_per_thread, src0_nrows);
|
|
1991
|
+
const uint32_t src0_end_row_x2 = src0_start_row + ((src0_end_row - src0_start_row) & ~1U);
|
|
1992
|
+
|
|
1993
|
+
// no work for this thread
|
|
1994
|
+
if (src0_start_row >= src0_end_row) {
|
|
1995
|
+
return;
|
|
1996
|
+
}
|
|
1997
|
+
|
|
1998
|
+
assert(ne13 % ne03 == 0);
|
|
1999
|
+
|
|
2000
|
+
const size_t dst_row_size = nb1;
|
|
2001
|
+
const size_t src0_row_size = nb01;
|
|
2002
|
+
const size_t src1_row_size = q8x4x2_row_size(ne10);
|
|
2003
|
+
|
|
2004
|
+
const size_t src0_row_size_padded = hex_round_up(src0_row_size, 128);
|
|
2005
|
+
|
|
2006
|
+
const uint32_t n_aids = src2->ne[0]; // num activated experts
|
|
2007
|
+
const uint32_t n_ids = ne02; // num experts
|
|
2008
|
+
|
|
2009
|
+
// Per-thread VTCM scratchpads for all tensors
|
|
2010
|
+
// Note that the entire src1 tensor is already in VTCM
|
|
2011
|
+
// For other tensors we allocate N rows per thread, padded to HVX vector size
|
|
2012
|
+
uint8_t * restrict spad_dst = dst_spad->data + dst_spad->size_per_thread * ith;
|
|
2013
|
+
uint8_t * restrict spad_src0 = src0_spad->data + src0_spad->size_per_thread * ith;
|
|
2014
|
+
uint8_t * restrict src1_data = src1_spad->data;
|
|
2015
|
+
|
|
2016
|
+
for (uint32_t ie1 = 0; ie1 < n_aids; ++ie1) { // for each expert
|
|
2017
|
+
const uint32_t eid = *(const int32_t *) ((const uint8_t *) src2->data + ie1 * src2->nb[0]);
|
|
2018
|
+
assert(eid < n_ids);
|
|
2019
|
+
|
|
2020
|
+
const uint8_t * restrict src0_row = (const uint8_t *) src0->data + eid * nb02;
|
|
2021
|
+
const uint8_t * restrict src1_col = (const uint8_t *) src1_data;
|
|
2022
|
+
float * restrict dst_row = (float *) (dst->data + ie1 * nb1);
|
|
2023
|
+
|
|
2024
|
+
// Prefill spad with src0 rows
|
|
2025
|
+
#pragma unroll(4)
|
|
2026
|
+
for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) {
|
|
2027
|
+
const int is0 = (ir0 - src0_start_row);
|
|
2028
|
+
if (is0 >= MM_SPAD_SRC0_NROWS) {
|
|
2029
|
+
break;
|
|
2030
|
+
}
|
|
2031
|
+
dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size),
|
|
2032
|
+
src0_row_size_padded, src0_row_size, 2);
|
|
2033
|
+
}
|
|
2034
|
+
|
|
2035
|
+
// Process src0 rows
|
|
2036
|
+
for (uint32_t ir0 = src0_start_row; ir0 < src0_end_row_x2; ir0 += 2) {
|
|
2037
|
+
const uint8_t * ss0 = dma_queue_pop(dma_queue).dst;
|
|
2038
|
+
mmctx->vec_dot_2x1(ne00, &dst_row[ir0], ss0, ss0 + src0_row_size_padded, src1_col);
|
|
2039
|
+
|
|
2040
|
+
// Prefetch next (n + spad_nrows) row
|
|
2041
|
+
const int pr0 = (ir0 + MM_SPAD_SRC0_NROWS);
|
|
2042
|
+
const int is0 = (pr0 - src0_start_row) % MM_SPAD_SRC0_NROWS;
|
|
2043
|
+
if (pr0 < src0_end_row_x2) {
|
|
2044
|
+
dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + pr0 * src0_row_size),
|
|
2045
|
+
src0_row_size_padded, src0_row_size, 2);
|
|
2046
|
+
}
|
|
2047
|
+
}
|
|
2048
|
+
|
|
2049
|
+
// Process the last row (if any)
|
|
2050
|
+
if (src0_end_row != src0_end_row_x2) {
|
|
2051
|
+
uint32_t ir0 = src0_end_row_x2;
|
|
2052
|
+
const uint32_t is0 = (ir0 - src0_start_row);
|
|
2053
|
+
dma_queue_push_ddr_to_vtcm(dma_queue, dma_make_ptr(spad_src0 + is0 * src0_row_size_padded, src0_row + ir0 * src0_row_size),
|
|
2054
|
+
src0_row_size_padded, src0_row_size, 1);
|
|
2055
|
+
const uint8_t * ss0 = dma_queue_pop(dma_queue).dst;
|
|
2056
|
+
mmctx->vec_dot_1x1(ne00, &dst_row[ir0], ss0, src1_col);
|
|
2057
|
+
}
|
|
2058
|
+
}
|
|
2059
|
+
|
|
2060
|
+
t2 = HAP_perf_get_qtimer_count();
|
|
2061
|
+
|
|
2062
|
+
FARF(HIGH, "matvec-id-%s %d/%d: %ux%ux%ux%u (%u:%u) * %ux%ux%ux%u (%ux%ux%ux%u) -> %ux%ux%ux%u usec %u\n", mmctx->type,
|
|
2063
|
+
ith, nth, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0_start_row, src0_end_row, src1->ne[0],
|
|
2064
|
+
src1->ne[1], src1->ne[2], src1->ne[3], src2->ne[0], src2->ne[1], src2->ne[2], src2->ne[3], dst->ne[0],
|
|
2065
|
+
dst->ne[1], dst->ne[2], dst->ne[3], (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1));
|
|
2066
|
+
}
|
|
2067
|
+
|
|
2068
|
+
// *** dynamic quant
|
|
2069
|
+
|
|
2070
|
+
static inline void quantize_block_f32_q8x1(float * restrict x, uint8_t * restrict y_q, uint8_t * restrict y_d) {
|
|
2071
|
+
assert((unsigned long) x % 128 == 0);
|
|
2072
|
+
assert((unsigned long) y_q % 128 == 0);
|
|
2073
|
+
|
|
2074
|
+
HVX_Vector * vx = (HVX_Vector *) x;
|
|
2075
|
+
HVX_Vector zero = Q6_V_vzero();
|
|
2076
|
+
|
|
2077
|
+
// Use reduce max fp32 to find max(abs(e)) first
|
|
2078
|
+
HVX_Vector vmax0_sf = hvx_vec_reduce_max_f32(hvx_vec_abs_f32(vx[0]));
|
|
2079
|
+
HVX_Vector vmax1_sf = hvx_vec_reduce_max_f32(hvx_vec_abs_f32(vx[1]));
|
|
2080
|
+
HVX_Vector vmax2_sf = hvx_vec_reduce_max_f32(hvx_vec_abs_f32(vx[2]));
|
|
2081
|
+
HVX_Vector vmax3_sf = hvx_vec_reduce_max_f32(hvx_vec_abs_f32(vx[3]));
|
|
2082
|
+
// Load and convert into QF32
|
|
2083
|
+
HVX_Vector vx0_qf = Q6_Vqf32_vsub_VsfVsf(vx[0], zero); // 32 elements
|
|
2084
|
+
HVX_Vector vx1_qf = Q6_Vqf32_vsub_VsfVsf(vx[1], zero); // 32 elements
|
|
2085
|
+
HVX_Vector vx2_qf = Q6_Vqf32_vsub_VsfVsf(vx[2], zero); // 32 elements
|
|
2086
|
+
HVX_Vector vx3_qf = Q6_Vqf32_vsub_VsfVsf(vx[3], zero); // 32 elements
|
|
2087
|
+
|
|
2088
|
+
// Convert to QF32
|
|
2089
|
+
HVX_Vector vmax0_qf = Q6_Vqf32_vsub_VsfVsf(vmax0_sf, zero); // replicated over all lanes
|
|
2090
|
+
HVX_Vector vmax1_qf = Q6_Vqf32_vsub_VsfVsf(vmax1_sf, zero); // replicated over all lanes
|
|
2091
|
+
HVX_Vector vmax2_qf = Q6_Vqf32_vsub_VsfVsf(vmax2_sf, zero); // replicated over all lanes
|
|
2092
|
+
HVX_Vector vmax3_qf = Q6_Vqf32_vsub_VsfVsf(vmax3_sf, zero); // replicated over all lanes
|
|
2093
|
+
|
|
2094
|
+
// Combine and convert to fp16
|
|
2095
|
+
HVX_Vector vmax01_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vmax1_qf, vmax0_qf)));
|
|
2096
|
+
HVX_Vector vmax23_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vmax3_qf, vmax2_qf)));
|
|
2097
|
+
|
|
2098
|
+
// Convert into fp16
|
|
2099
|
+
HVX_Vector vx01_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx1_qf, vx0_qf)));
|
|
2100
|
+
HVX_Vector vx23_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx3_qf, vx2_qf)));
|
|
2101
|
+
|
|
2102
|
+
HVX_Vector vd01_qf16 = Q6_Vqf16_vmpy_VhfVhf(vmax01_hf, Q6_Vh_vsplat_R(0x2008)); // 1.0 / 127.0
|
|
2103
|
+
HVX_Vector vd23_qf16 = Q6_Vqf16_vmpy_VhfVhf(vmax23_hf, Q6_Vh_vsplat_R(0x2008)); // 1.0 / 127.0
|
|
2104
|
+
HVX_Vector vd01_hf = Q6_Vhf_equals_Vqf16(vd01_qf16);
|
|
2105
|
+
HVX_Vector vd23_hf = Q6_Vhf_equals_Vqf16(vd23_qf16);
|
|
2106
|
+
|
|
2107
|
+
hvx_vec_store_u(y_d + 0, 2, vd01_hf);
|
|
2108
|
+
HVX_Vector rotated_vd_hf = Q6_V_vror_VR(vd01_hf, 64);
|
|
2109
|
+
hvx_vec_store_u(y_d + 2, 2, rotated_vd_hf);
|
|
2110
|
+
|
|
2111
|
+
hvx_vec_store_u(y_d + 4, 2, vd23_hf);
|
|
2112
|
+
rotated_vd_hf = Q6_V_vror_VR(vd23_hf, 64);
|
|
2113
|
+
hvx_vec_store_u(y_d + 6, 2, rotated_vd_hf);
|
|
2114
|
+
|
|
2115
|
+
// Divide input by the scale
|
|
2116
|
+
HVX_Vector vd01_inv_hf = hvx_vec_inverse_f16(vd01_hf);
|
|
2117
|
+
HVX_Vector vd23_inv_hf = hvx_vec_inverse_f16(vd23_hf);
|
|
2118
|
+
vx01_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx01_hf, vd01_inv_hf));
|
|
2119
|
+
vx23_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx23_hf, vd23_inv_hf));
|
|
2120
|
+
|
|
2121
|
+
// Convert to int8
|
|
2122
|
+
HVX_Vector vx01_i16 = hvx_vec_i16_from_hf_rnd_sat(vx01_hf);
|
|
2123
|
+
HVX_Vector vx23_i16 = hvx_vec_i16_from_hf_rnd_sat(vx23_hf);
|
|
2124
|
+
HVX_Vector vx_i8 = Q6_Vb_vpack_VhVh_sat(vx23_i16, vx01_i16);
|
|
2125
|
+
|
|
2126
|
+
*(HVX_Vector *) y_q = vx_i8;
|
|
2127
|
+
}
|
|
2128
|
+
|
|
2129
|
+
static inline void quantize_block_f32_q8x2(float * restrict x, uint8_t * restrict y_q, uint8_t * restrict y_d) {
|
|
2130
|
+
assert((unsigned long) x % 128 == 0);
|
|
2131
|
+
assert((unsigned long) y_q % 128 == 0);
|
|
2132
|
+
|
|
2133
|
+
HVX_Vector * vx = (HVX_Vector *) x;
|
|
2134
|
+
|
|
2135
|
+
// Load and convert into QF32
|
|
2136
|
+
HVX_Vector zero = Q6_V_vzero();
|
|
2137
|
+
HVX_Vector vx0_qf = Q6_Vqf32_vsub_VsfVsf(vx[0], zero); // 32 elements
|
|
2138
|
+
HVX_Vector vx1_qf = Q6_Vqf32_vsub_VsfVsf(vx[1], zero); // 32 elements
|
|
2139
|
+
HVX_Vector vx2_qf = Q6_Vqf32_vsub_VsfVsf(vx[2], zero); // 32 elements
|
|
2140
|
+
HVX_Vector vx3_qf = Q6_Vqf32_vsub_VsfVsf(vx[3], zero); // 32 elements
|
|
2141
|
+
|
|
2142
|
+
// Convert into fp16
|
|
2143
|
+
HVX_Vector vx01_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx1_qf, vx0_qf)));
|
|
2144
|
+
HVX_Vector vx23_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx3_qf, vx2_qf)));
|
|
2145
|
+
|
|
2146
|
+
// Compute max and scale
|
|
2147
|
+
HVX_Vector vmax01_hf = hvx_vec_reduce_max_f16(hvx_vec_abs_f16(vx01_hf)); // replicated over all lanes
|
|
2148
|
+
HVX_Vector vmax23_hf = hvx_vec_reduce_max_f16(hvx_vec_abs_f16(vx23_hf)); // replicated over all lanes
|
|
2149
|
+
|
|
2150
|
+
HVX_Vector vd01_qf16 = Q6_Vqf16_vmpy_VhfVhf(vmax01_hf, Q6_Vh_vsplat_R(0x2008)); // 1.0 / 127.0
|
|
2151
|
+
HVX_Vector vd23_qf16 = Q6_Vqf16_vmpy_VhfVhf(vmax23_hf, Q6_Vh_vsplat_R(0x2008)); // 1.0 / 127.0
|
|
2152
|
+
HVX_Vector vd01_hf = Q6_Vhf_equals_Vqf16(vd01_qf16);
|
|
2153
|
+
HVX_Vector vd23_hf = Q6_Vhf_equals_Vqf16(vd23_qf16);
|
|
2154
|
+
|
|
2155
|
+
hvx_vec_store_u(y_d + 0, 4, vd01_hf);
|
|
2156
|
+
hvx_vec_store_u(y_d + 4, 4, vd23_hf);
|
|
2157
|
+
|
|
2158
|
+
// Divide input by the scale
|
|
2159
|
+
HVX_Vector vd01_inv_hf = hvx_vec_inverse_f16(vd01_hf);
|
|
2160
|
+
HVX_Vector vd23_inv_hf = hvx_vec_inverse_f16(vd23_hf);
|
|
2161
|
+
vx01_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx01_hf, vd01_inv_hf));
|
|
2162
|
+
vx23_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx23_hf, vd23_inv_hf));
|
|
2163
|
+
|
|
2164
|
+
// Convert to int8
|
|
2165
|
+
HVX_Vector vx01_i16 = hvx_vec_i16_from_hf_rnd_sat(vx01_hf);
|
|
2166
|
+
HVX_Vector vx23_i16 = hvx_vec_i16_from_hf_rnd_sat(vx23_hf);
|
|
2167
|
+
HVX_Vector vx_i8 = Q6_Vb_vpack_VhVh_sat(vx23_i16, vx01_i16);
|
|
2168
|
+
|
|
2169
|
+
*(HVX_Vector *) y_q = vx_i8;
|
|
2170
|
+
}
|
|
2171
|
+
|
|
2172
|
+
static inline void quantize_block_f32_q8x4(float * restrict x, uint8_t * restrict y_q, uint8_t * restrict y_d) {
|
|
2173
|
+
assert((unsigned long) x % 128 == 0);
|
|
2174
|
+
assert((unsigned long) y_q % 128 == 0);
|
|
2175
|
+
|
|
2176
|
+
HVX_Vector * vx = (HVX_Vector *) x;
|
|
2177
|
+
|
|
2178
|
+
// Load and convert into QF32
|
|
2179
|
+
HVX_Vector zero = Q6_V_vzero();
|
|
2180
|
+
HVX_Vector vx0_qf = Q6_Vqf32_vsub_VsfVsf(vx[0], zero); // 32 elements
|
|
2181
|
+
HVX_Vector vx1_qf = Q6_Vqf32_vsub_VsfVsf(vx[1], zero); // 32 elements
|
|
2182
|
+
HVX_Vector vx2_qf = Q6_Vqf32_vsub_VsfVsf(vx[2], zero); // 32 elements
|
|
2183
|
+
HVX_Vector vx3_qf = Q6_Vqf32_vsub_VsfVsf(vx[3], zero); // 32 elements
|
|
2184
|
+
|
|
2185
|
+
// Convert into fp16
|
|
2186
|
+
HVX_Vector vx01_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx1_qf, vx0_qf)));
|
|
2187
|
+
HVX_Vector vx23_hf = Q6_Vh_vdeal_Vh(Q6_Vhf_equals_Wqf32(Q6_W_vcombine_VV(vx3_qf, vx2_qf)));
|
|
2188
|
+
|
|
2189
|
+
// Compute max and scale
|
|
2190
|
+
HVX_Vector vmax_hf = hvx_vec_reduce_max_f16(hvx_vec_abs_f16(vx01_hf));
|
|
2191
|
+
vmax_hf = hvx_vec_reduce_max2_f16(hvx_vec_abs_f16(vx23_hf), vmax_hf); // replicated over all lanes
|
|
2192
|
+
|
|
2193
|
+
HVX_Vector vd_qf16 = Q6_Vqf16_vmpy_VhfVhf(vmax_hf, Q6_Vh_vsplat_R(0x2008)); // 1.0 / 127.0
|
|
2194
|
+
HVX_Vector vd_hf = Q6_Vhf_equals_Vqf16(vd_qf16);
|
|
2195
|
+
|
|
2196
|
+
*(HVX_UVector *) y_d = vd_hf;
|
|
2197
|
+
|
|
2198
|
+
// Divide input by the scale
|
|
2199
|
+
HVX_Vector vd_inv_hf = hvx_vec_inverse_f16(vd_hf);
|
|
2200
|
+
vx01_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx01_hf, vd_inv_hf));
|
|
2201
|
+
vx23_hf = Q6_Vhf_equals_Vqf16(Q6_Vqf16_vmpy_VhfVhf(vx23_hf, vd_inv_hf));
|
|
2202
|
+
|
|
2203
|
+
// Convert to int8
|
|
2204
|
+
HVX_Vector vx01_i16 = hvx_vec_i16_from_hf_rnd_sat(vx01_hf);
|
|
2205
|
+
HVX_Vector vx23_i16 = hvx_vec_i16_from_hf_rnd_sat(vx23_hf);
|
|
2206
|
+
HVX_Vector vx_i8 = Q6_Vb_vpack_VhVh_sat(vx23_i16, vx01_i16);
|
|
2207
|
+
|
|
2208
|
+
*(HVX_Vector *) y_q = vx_i8;
|
|
2209
|
+
}
|
|
2210
|
+
|
|
2211
|
+
// Overrides input x
|
|
2212
|
+
static void quantize_row_f32_q8x4x2(float * restrict x, uint8_t * restrict y, uint32_t k) {
|
|
2213
|
+
assert(k % 32 == 0);
|
|
2214
|
+
const uint32_t qk = QK_Q8_0x4x2;
|
|
2215
|
+
const uint32_t nb = (k + qk - 1) / qk;
|
|
2216
|
+
|
|
2217
|
+
const uint32_t qrow_size = k; // int8
|
|
2218
|
+
|
|
2219
|
+
const uint32_t dblk_size = 8 * 2; // 8x __fp16
|
|
2220
|
+
const uint32_t qblk_size = QK_Q8_0x4x2; // int8
|
|
2221
|
+
|
|
2222
|
+
uint8_t * restrict y_q = (y + 0); // quants first
|
|
2223
|
+
uint8_t * restrict y_d = (y + qrow_size); // then scales
|
|
2224
|
+
|
|
2225
|
+
// Temp scales override input since we're working off of the aligned temp buffer in VTCM
|
|
2226
|
+
uint8_t * restrict t_d = (uint8_t *) x;
|
|
2227
|
+
|
|
2228
|
+
for (uint32_t i = 0; i < nb; i++) {
|
|
2229
|
+
#if FP32_QUANTIZE_GROUP_SIZE == 32
|
|
2230
|
+
quantize_block_f32_q8x1(x + (i*2 + 0) * qk/2, y_q + (i*2 + 0) * qblk_size/2, t_d + (i*2 + 0) * dblk_size/2);
|
|
2231
|
+
quantize_block_f32_q8x1(x + (i*2 + 1) * qk/2, y_q + (i*2 + 1) * qblk_size/2, t_d + (i*2 + 1) * dblk_size/2);
|
|
2232
|
+
#elif FP32_QUANTIZE_GROUP_SIZE == 64
|
|
2233
|
+
quantize_block_f32_q8x2(x + (i*2 + 0) * qk/2, y_q + (i*2 + 0) * qblk_size/2, t_d + (i*2 + 0) * dblk_size/2);
|
|
2234
|
+
quantize_block_f32_q8x2(x + (i*2 + 1) * qk/2, y_q + (i*2 + 1) * qblk_size/2, t_d + (i*2 + 1) * dblk_size/2);
|
|
2235
|
+
#elif FP32_QUANTIZE_GROUP_SIZE == 128
|
|
2236
|
+
quantize_block_f32_q8x4(x + (i*2 + 0) * qk/2, y_q + (i*2 + 0) * qblk_size/2, t_d + (i*2 + 0) * dblk_size/2);
|
|
2237
|
+
quantize_block_f32_q8x4(x + (i*2 + 1) * qk/2, y_q + (i*2 + 1) * qblk_size/2, t_d + (i*2 + 1) * dblk_size/2);
|
|
2238
|
+
#else
|
|
2239
|
+
#error "FP32_QUANTIZE_GROUP_SIZE must be 32, 64, or 128"
|
|
2240
|
+
#endif
|
|
2241
|
+
}
|
|
2242
|
+
|
|
2243
|
+
// now copy the scales into final location
|
|
2244
|
+
hvx_copy_f16_ua(y_d, t_d, nb * 8);
|
|
2245
|
+
}
|
|
2246
|
+
|
|
2247
|
+
static void quantize_f32_q8x4x2(unsigned int nth, unsigned int ith, void * data) {
|
|
2248
|
+
struct htp_matmul_context * mmctx = data;
|
|
2249
|
+
struct htp_ops_context * octx = mmctx->octx;
|
|
2250
|
+
|
|
2251
|
+
const struct htp_tensor * src = &octx->src1;
|
|
2252
|
+
uint8_t * restrict dst = octx->src1_spad.data;
|
|
2253
|
+
struct htp_spad * spad = &octx->src0_spad;
|
|
2254
|
+
uint32_t nrows_per_thread = mmctx->src1_nrows_per_thread;
|
|
2255
|
+
|
|
2256
|
+
uint64_t t1 = HAP_perf_get_qtimer_count();
|
|
2257
|
+
|
|
2258
|
+
const uint32_t ne0 = src->ne[0];
|
|
2259
|
+
const uint32_t ne1 = src->ne[1];
|
|
2260
|
+
const uint32_t ne2 = src->ne[2];
|
|
2261
|
+
const uint32_t ne3 = src->ne[3];
|
|
2262
|
+
|
|
2263
|
+
const uint32_t nrows = ne1 * ne2 * ne3; // total n_rows
|
|
2264
|
+
|
|
2265
|
+
const uint32_t ir_first = nrows_per_thread * ith; // first row
|
|
2266
|
+
const uint32_t ir_last = MIN(ir_first + nrows_per_thread, nrows); // last row
|
|
2267
|
+
|
|
2268
|
+
const size_t src_row_size = src->nb[1];
|
|
2269
|
+
const size_t dst_row_size = q8x4x2_row_size(ne0);
|
|
2270
|
+
|
|
2271
|
+
uint8_t * restrict src_data = (uint8_t *) src->data + (src_row_size * ir_first);
|
|
2272
|
+
uint8_t * restrict dst_data = (uint8_t *) dst + (dst_row_size * ir_first);
|
|
2273
|
+
uint8_t * restrict tmp_data = (uint8_t *) spad->data + (spad->size_per_thread * ith);
|
|
2274
|
+
|
|
2275
|
+
const size_t src_row_size_padded = hex_round_up(src_row_size, QK_Q8_0x4x2 * sizeof(float));
|
|
2276
|
+
memset(tmp_data, 0, src_row_size_padded); // zero-out temp row data for padding
|
|
2277
|
+
|
|
2278
|
+
for (uint32_t i = ir_first; i < ir_last; ++i) {
|
|
2279
|
+
hex_l2fetch(src_data, src_row_size, src_row_size, 2);
|
|
2280
|
+
hvx_copy_f32_aa(tmp_data, src_data, ne0);
|
|
2281
|
+
|
|
2282
|
+
// FARF(HIGH, "quantize-q8x4-row: %u\n", i);
|
|
2283
|
+
quantize_row_f32_q8x4x2((float *) tmp_data, dst_data, ne0);
|
|
2284
|
+
dst_data += dst_row_size;
|
|
2285
|
+
src_data += src_row_size;
|
|
2286
|
+
}
|
|
2287
|
+
|
|
2288
|
+
uint64_t t2 = HAP_perf_get_qtimer_count();
|
|
2289
|
+
|
|
2290
|
+
FARF(HIGH, "quantize-f32-q8x4: %u/%u : n-rows %u (%u:%u) row-size %u -> %u usec %u\n", ith, nth, nrows, ir_first,
|
|
2291
|
+
ir_last, src_row_size, dst_row_size, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1));
|
|
2292
|
+
}
|
|
2293
|
+
|
|
2294
|
+
static void quantize_f32_f16(unsigned int nth, unsigned int ith, void * data) {
|
|
2295
|
+
struct htp_matmul_context * mmctx = data;
|
|
2296
|
+
struct htp_ops_context * octx = mmctx->octx;
|
|
2297
|
+
|
|
2298
|
+
const struct htp_tensor * src = &octx->src1;
|
|
2299
|
+
uint8_t * restrict dst = octx->src1_spad.data;
|
|
2300
|
+
uint32_t nrows_per_thread = mmctx->src1_nrows_per_thread;
|
|
2301
|
+
uint32_t dst_stride = octx->src1_spad.stride;
|
|
2302
|
+
|
|
2303
|
+
uint64_t t1 = HAP_perf_get_qtimer_count();
|
|
2304
|
+
|
|
2305
|
+
const uint32_t ne0 = src->ne[0];
|
|
2306
|
+
const uint32_t ne1 = src->ne[1];
|
|
2307
|
+
const uint32_t ne2 = src->ne[2];
|
|
2308
|
+
const uint32_t ne3 = src->ne[3];
|
|
2309
|
+
|
|
2310
|
+
const uint32_t nrows = ne1 * ne2 * ne3; // total n_rows
|
|
2311
|
+
|
|
2312
|
+
const uint32_t ir_first = nrows_per_thread * ith; // first row
|
|
2313
|
+
const uint32_t ir_last = MIN(ir_first + nrows_per_thread, nrows); // last row
|
|
2314
|
+
|
|
2315
|
+
const size_t src_row_size = ne0 * sizeof(float);
|
|
2316
|
+
const size_t src_stride = src->nb[1];
|
|
2317
|
+
|
|
2318
|
+
uint8_t * restrict src_data = (uint8_t *) src->data + (src_stride * ir_first);
|
|
2319
|
+
uint8_t * restrict dst_data = (uint8_t *) dst + (dst_stride * ir_first);
|
|
2320
|
+
|
|
2321
|
+
for (uint32_t i = ir_first; i < ir_last; ++i) {
|
|
2322
|
+
hex_l2fetch(src_data, src_row_size, src_stride, 2);
|
|
2323
|
+
hvx_copy_f16_f32_au(dst_data, src_data, ne0);
|
|
2324
|
+
|
|
2325
|
+
dst_data += dst_stride;
|
|
2326
|
+
src_data += src_stride;
|
|
2327
|
+
}
|
|
2328
|
+
|
|
2329
|
+
uint64_t t2 = HAP_perf_get_qtimer_count();
|
|
2330
|
+
|
|
2331
|
+
FARF(HIGH, "quantize-f32-f16: %u/%u : n-rows %u (%u:%u) row-size %u (%u) -> %u usec %u\n", ith, nth, nrows, ir_first,
|
|
2332
|
+
ir_last, src_row_size, src_stride, dst_stride, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1));
|
|
2333
|
+
}
|
|
2334
|
+
|
|
2335
|
+
// TODO just a plain copy that should be done via the DMA during the Op setup
|
|
2336
|
+
static void quantize_f16_f16(unsigned int nth, unsigned int ith, void * data) {
|
|
2337
|
+
struct htp_matmul_context * mmctx = data;
|
|
2338
|
+
struct htp_ops_context * octx = mmctx->octx;
|
|
2339
|
+
|
|
2340
|
+
const struct htp_tensor * src = &octx->src1;
|
|
2341
|
+
uint8_t * restrict dst = octx->src1_spad.data;
|
|
2342
|
+
uint32_t nrows_per_thread = mmctx->src1_nrows_per_thread;
|
|
2343
|
+
uint32_t dst_stride = octx->src1_spad.stride;
|
|
2344
|
+
|
|
2345
|
+
uint64_t t1 = HAP_perf_get_qtimer_count();
|
|
2346
|
+
|
|
2347
|
+
const uint32_t ne0 = src->ne[0];
|
|
2348
|
+
const uint32_t ne1 = src->ne[1];
|
|
2349
|
+
const uint32_t ne2 = src->ne[2];
|
|
2350
|
+
const uint32_t ne3 = src->ne[3];
|
|
2351
|
+
|
|
2352
|
+
const uint32_t nrows = ne1 * ne2 * ne3; // total n_rows
|
|
2353
|
+
|
|
2354
|
+
const uint32_t ir_first = nrows_per_thread * ith; // first row
|
|
2355
|
+
const uint32_t ir_last = MIN(ir_first + nrows_per_thread, nrows); // last row
|
|
2356
|
+
|
|
2357
|
+
const size_t src_row_size = ne0 * sizeof(float);
|
|
2358
|
+
const size_t src_stride = src->nb[1];
|
|
2359
|
+
|
|
2360
|
+
uint8_t * restrict src_data = (uint8_t *) src->data + (src_stride * ir_first);
|
|
2361
|
+
uint8_t * restrict dst_data = (uint8_t *) dst + (dst_stride * ir_first);
|
|
2362
|
+
|
|
2363
|
+
for (uint32_t i = ir_first; i < ir_last; ++i) {
|
|
2364
|
+
hex_l2fetch(src_data, src_row_size, src_stride, 2);
|
|
2365
|
+
hvx_copy_f16_au(dst_data, src_data, ne0);
|
|
2366
|
+
|
|
2367
|
+
dst_data += dst_stride;
|
|
2368
|
+
src_data += src_stride;
|
|
2369
|
+
}
|
|
2370
|
+
|
|
2371
|
+
uint64_t t2 = HAP_perf_get_qtimer_count();
|
|
2372
|
+
|
|
2373
|
+
FARF(HIGH, "quantize-f16-f16: %u/%u : n-rows %u (%u:%u) row-size %u (%u) -> %u usec %u\n", ith, nth, nrows, ir_first,
|
|
2374
|
+
ir_last, src_row_size, src_stride, dst_stride, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1));
|
|
2375
|
+
}
|
|
2376
|
+
|
|
2377
|
+
|
|
2378
|
+
static inline bool htp_is_permuted(const struct htp_tensor * t) {
|
|
2379
|
+
return t->nb[0] > t->nb[1] || t->nb[1] > t->nb[2] || t->nb[2] > t->nb[3];
|
|
2380
|
+
}
|
|
2381
|
+
|
|
2382
|
+
static int htp_mminit_vec_dot(struct htp_matmul_context * mmctx, enum htp_data_type type) {
|
|
2383
|
+
switch (type) {
|
|
2384
|
+
case HTP_TYPE_Q4_0:
|
|
2385
|
+
mmctx->type = "q4x4x2-f32";
|
|
2386
|
+
mmctx->vec_dot_1x1 = vec_dot_q4x4x2_q8x4x2_1x1;
|
|
2387
|
+
mmctx->vec_dot_2x1 = vec_dot_q4x4x2_q8x4x2_2x1;
|
|
2388
|
+
mmctx->vec_dot_2x2 = vec_dot_q4x4x2_q8x4x2_2x2;
|
|
2389
|
+
return 0;
|
|
2390
|
+
case HTP_TYPE_Q8_0:
|
|
2391
|
+
mmctx->type = "q8x4x2-f32";
|
|
2392
|
+
mmctx->vec_dot_1x1 = vec_dot_q8x4x2_q8x4x2_1x1;
|
|
2393
|
+
mmctx->vec_dot_2x1 = vec_dot_q8x4x2_q8x4x2_2x1;
|
|
2394
|
+
mmctx->vec_dot_2x2 = vec_dot_q8x4x2_q8x4x2_2x2;
|
|
2395
|
+
return 0;
|
|
2396
|
+
case HTP_TYPE_MXFP4:
|
|
2397
|
+
mmctx->type = "mxfp4x4x2-f32";
|
|
2398
|
+
mmctx->vec_dot_1x1 = vec_dot_mxfp4x4x2_q8x4x2_1x1;
|
|
2399
|
+
mmctx->vec_dot_2x1 = vec_dot_mxfp4x4x2_q8x4x2_2x1;
|
|
2400
|
+
mmctx->vec_dot_2x2 = vec_dot_mxfp4x4x2_q8x4x2_2x2;
|
|
2401
|
+
return 0;
|
|
2402
|
+
default:
|
|
2403
|
+
return -1;
|
|
2404
|
+
}
|
|
2405
|
+
}
|
|
2406
|
+
|
|
2407
|
+
static void htp_mminit_spad(struct htp_ops_context * octx,
|
|
2408
|
+
size_t dst_row_size,
|
|
2409
|
+
size_t src0_row_size_padded,
|
|
2410
|
+
size_t src1_row_size,
|
|
2411
|
+
uint32_t src1_nrows,
|
|
2412
|
+
size_t src2_spad_size_per_thread) {
|
|
2413
|
+
octx->dst_spad.size_per_thread = hex_round_up(MM_SPAD_DST_NROWS * dst_row_size, 256);
|
|
2414
|
+
octx->src0_spad.size_per_thread = hex_round_up(MM_SPAD_SRC0_NROWS * src0_row_size_padded, 256);
|
|
2415
|
+
octx->src1_spad.size_per_thread = hex_round_up(src1_row_size * src1_nrows, 256);
|
|
2416
|
+
|
|
2417
|
+
if (src2_spad_size_per_thread > 0) {
|
|
2418
|
+
octx->src2_spad.size_per_thread = src2_spad_size_per_thread;
|
|
2419
|
+
octx->src2_spad.size = octx->src2_spad.size_per_thread;
|
|
2420
|
+
}
|
|
2421
|
+
|
|
2422
|
+
// src0 spad is also used in dynamic quantizer to store padded src1 rows
|
|
2423
|
+
size_t src1_row_size_padded = hex_round_up(src1_row_size, QK_Q8_0x4x2 * sizeof(float));
|
|
2424
|
+
if (octx->src0_spad.size_per_thread < src1_row_size_padded) {
|
|
2425
|
+
octx->src0_spad.size_per_thread = src1_row_size_padded;
|
|
2426
|
+
}
|
|
2427
|
+
|
|
2428
|
+
octx->src1_spad.size = octx->src1_spad.size_per_thread;
|
|
2429
|
+
octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads;
|
|
2430
|
+
octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads;
|
|
2431
|
+
}
|
|
2432
|
+
|
|
2433
|
+
int op_matmul(struct htp_ops_context * octx) {
|
|
2434
|
+
htp_matmul_tensors_preamble;
|
|
2435
|
+
|
|
2436
|
+
struct htp_matmul_context mmctx_struct = {0};
|
|
2437
|
+
struct htp_matmul_context * mmctx = &mmctx_struct;
|
|
2438
|
+
mmctx->octx = octx;
|
|
2439
|
+
|
|
2440
|
+
const uint32_t src0_nrows = ne01 * ne02 * ne03;
|
|
2441
|
+
const uint32_t src1_nrows = ne11 * ne12 * ne13;
|
|
2442
|
+
|
|
2443
|
+
// Compute src0_nrows_per_thread
|
|
2444
|
+
mmctx->src0_nrows_per_thread = (src0_nrows + octx->n_threads - 1) / octx->n_threads;
|
|
2445
|
+
mmctx->src0_nrows_per_thread += (mmctx->src0_nrows_per_thread & 1); // round up to even
|
|
2446
|
+
|
|
2447
|
+
const size_t src0_row_size = nb01;
|
|
2448
|
+
const size_t dst_row_size = nb1;
|
|
2449
|
+
size_t src1_row_size = nb11;
|
|
2450
|
+
|
|
2451
|
+
const size_t src0_row_size_padded = hex_round_up(src0_row_size, 128);
|
|
2452
|
+
size_t src1_row_size_padded;
|
|
2453
|
+
|
|
2454
|
+
worker_callback_t quant_job_func;
|
|
2455
|
+
worker_callback_t matmul_job_func = src1_nrows > 1 ? matmul_2d : matvec_2d;
|
|
2456
|
+
|
|
2457
|
+
bool need_quant = !(octx->flags & HTP_OPFLAGS_SKIP_QUANTIZE);
|
|
2458
|
+
|
|
2459
|
+
if (src0->type == HTP_TYPE_F16) {
|
|
2460
|
+
// Try optimized f16-f16 path first (src1 in VTCM)
|
|
2461
|
+
const size_t f16_src1_row_size = hex_round_up(ne10 * 2, 128);
|
|
2462
|
+
const size_t f16_src1_spad_size = hex_round_up(f16_src1_row_size * src1_nrows, 256);
|
|
2463
|
+
const size_t f16_src0_spad_size = hex_round_up(MM_SPAD_SRC0_NROWS * src0_row_size_padded, 256) * octx->n_threads;
|
|
2464
|
+
const size_t f16_dst_spad_size = hex_round_up(MM_SPAD_DST_NROWS * dst_row_size, 256) * octx->n_threads;
|
|
2465
|
+
|
|
2466
|
+
const size_t f16_total_size = f16_src1_spad_size + f16_src0_spad_size + f16_dst_spad_size;
|
|
2467
|
+
|
|
2468
|
+
// Default matmul implementation does not support multi-batch src0 (N-vs-N broadcasting).
|
|
2469
|
+
// It only supports 1-vs-N broadcasting (src0 is 2D) or standard 2D matmul.
|
|
2470
|
+
const bool is_batched = (ne02 > 1) || (ne03 > 1);
|
|
2471
|
+
const bool is_permuted = htp_is_permuted(&octx->src0) || htp_is_permuted(&octx->src1);
|
|
2472
|
+
|
|
2473
|
+
if (!is_batched && !is_permuted && f16_total_size <= octx->ctx->vtcm_size) {
|
|
2474
|
+
// Optimized path
|
|
2475
|
+
quant_job_func = (src1->type == HTP_TYPE_F32) ? quantize_f32_f16 : quantize_f16_f16;
|
|
2476
|
+
mmctx->type = "f16-f16";
|
|
2477
|
+
mmctx->vec_dot_1x1 = vec_dot_f16_f16_aa_1x1;
|
|
2478
|
+
mmctx->vec_dot_2x1 = vec_dot_f16_f16_aa_2x1;
|
|
2479
|
+
mmctx->vec_dot_2x2 = vec_dot_f16_f16_aa_2x2;
|
|
2480
|
+
|
|
2481
|
+
src1_row_size = f16_src1_row_size; // row size post quantization
|
|
2482
|
+
|
|
2483
|
+
octx->dst_spad.size_per_thread = hex_round_up(MM_SPAD_DST_NROWS * dst_row_size, 256);
|
|
2484
|
+
octx->src0_spad.size_per_thread = hex_round_up(MM_SPAD_SRC0_NROWS * src0_row_size_padded, 256);
|
|
2485
|
+
octx->src1_spad.size_per_thread = hex_round_up(src1_row_size * src1_nrows, 256);
|
|
2486
|
+
|
|
2487
|
+
octx->src1_spad.size = octx->src1_spad.size_per_thread;
|
|
2488
|
+
octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads;
|
|
2489
|
+
octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads;
|
|
2490
|
+
} else {
|
|
2491
|
+
// Fallback to f16/f32 (DDR) if src1 doesn't fit in VTCM or broadcasting is required
|
|
2492
|
+
quant_job_func = NULL;
|
|
2493
|
+
if (src1->type == HTP_TYPE_F32) {
|
|
2494
|
+
mmctx->type = "f16-f32";
|
|
2495
|
+
mmctx->vec_dot_1x1 = vec_dot_f16_f32_uu_1x1;
|
|
2496
|
+
matmul_job_func = matmul_4d;
|
|
2497
|
+
} else {
|
|
2498
|
+
mmctx->type = "f16-f16";
|
|
2499
|
+
mmctx->vec_dot_1x1 = vec_dot_f16_f16_uu_1x1;
|
|
2500
|
+
matmul_job_func = matmul_4d;
|
|
2501
|
+
}
|
|
2502
|
+
|
|
2503
|
+
src1_row_size = nb11; // original row size in DDR
|
|
2504
|
+
|
|
2505
|
+
octx->dst_spad.size_per_thread = hex_round_up(MM_SPAD_DST_NROWS * dst_row_size, 256);
|
|
2506
|
+
octx->src0_spad.size_per_thread = hex_round_up(MM_SPAD_SRC0_NROWS * src0_row_size, 256);
|
|
2507
|
+
octx->src1_spad.size_per_thread = hex_round_up(MM_SPAD_SRC1_NROWS * src1_row_size, 256);
|
|
2508
|
+
|
|
2509
|
+
octx->src0_spad.size = octx->src0_spad.size_per_thread * octx->n_threads;
|
|
2510
|
+
octx->src1_spad.size = octx->src1_spad.size_per_thread * octx->n_threads;
|
|
2511
|
+
octx->dst_spad.size = octx->dst_spad.size_per_thread * octx->n_threads;
|
|
2512
|
+
|
|
2513
|
+
// Init fastdiv for matmul_4d (supports broadcasting)
|
|
2514
|
+
mmctx->mm_div_ne12_ne1 = init_fastdiv_values(src1->ne[2] * dst->ne[1]);
|
|
2515
|
+
mmctx->mm_div_ne1 = init_fastdiv_values(dst->ne[1]);
|
|
2516
|
+
mmctx->mm_div_r2 = init_fastdiv_values(src1->ne[2] / src0->ne[2]);
|
|
2517
|
+
mmctx->mm_div_r3 = init_fastdiv_values(src1->ne[3] / src0->ne[3]);
|
|
2518
|
+
|
|
2519
|
+
need_quant = false;
|
|
2520
|
+
}
|
|
2521
|
+
} else {
|
|
2522
|
+
if (htp_mminit_vec_dot(mmctx, src0->type) != 0) {
|
|
2523
|
+
return HTP_STATUS_NO_SUPPORT;
|
|
2524
|
+
}
|
|
2525
|
+
|
|
2526
|
+
quant_job_func = quantize_f32_q8x4x2;
|
|
2527
|
+
src1_row_size = q8x4x2_row_size(ne10);
|
|
2528
|
+
htp_mminit_spad(octx, dst_row_size, src0_row_size_padded, src1_row_size, src1_nrows, 0);
|
|
2529
|
+
}
|
|
2530
|
+
|
|
2531
|
+
// VTCM scratchpads for all tensors
|
|
2532
|
+
size_t spad_size = octx->src1_spad.size + octx->src0_spad.size + octx->dst_spad.size;
|
|
2533
|
+
|
|
2534
|
+
FARF(HIGH, "matmul-%s : src0-spad-size %u src1-spad-size %u dst-spad-size %u (%zu)\n", mmctx->type,
|
|
2535
|
+
octx->src0_spad.size, octx->src1_spad.size, octx->dst_spad.size, spad_size);
|
|
2536
|
+
|
|
2537
|
+
FARF(HIGH, "matmul-%s : %ux%ux%ux%u * %ux%ux%ux%u-> %ux%ux%ux%u (0x%p, 0x%p, 0x%p)\n", mmctx->type, src0->ne[0],
|
|
2538
|
+
src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], dst->ne[0],
|
|
2539
|
+
dst->ne[1], dst->ne[2], dst->ne[3], src0->data, src1->data, dst->data);
|
|
2540
|
+
|
|
2541
|
+
// Make sure the reserved vtcm size is sufficient
|
|
2542
|
+
if (octx->ctx->vtcm_size < spad_size) {
|
|
2543
|
+
FARF(ERROR, "matmul-%s : current VTCM reservation %zu is too small, needed %zu\n", mmctx->type,
|
|
2544
|
+
octx->ctx->vtcm_size, spad_size);
|
|
2545
|
+
return HTP_STATUS_VTCM_TOO_SMALL;
|
|
2546
|
+
}
|
|
2547
|
+
|
|
2548
|
+
octx->src0_spad.data = octx->ctx->vtcm_base;
|
|
2549
|
+
octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size;
|
|
2550
|
+
octx->dst_spad.data = octx->src1_spad.data + octx->src1_spad.size;
|
|
2551
|
+
|
|
2552
|
+
octx->src0_spad.stride = src0_row_size_padded;
|
|
2553
|
+
octx->src1_spad.stride = src1_row_size;
|
|
2554
|
+
|
|
2555
|
+
if (need_quant) {
|
|
2556
|
+
const uint32_t n_quant_jobs = MIN(src1_nrows, octx->n_threads);
|
|
2557
|
+
mmctx->src1_nrows_per_thread = (src1_nrows + n_quant_jobs - 1) / n_quant_jobs;
|
|
2558
|
+
worker_pool_run_func(octx->ctx->worker_pool, quant_job_func, mmctx, n_quant_jobs);
|
|
2559
|
+
}
|
|
2560
|
+
|
|
2561
|
+
if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) {
|
|
2562
|
+
const uint32_t n_matmul_jobs = octx->n_threads;
|
|
2563
|
+
worker_pool_run_func(octx->ctx->worker_pool, matmul_job_func, mmctx, n_matmul_jobs);
|
|
2564
|
+
}
|
|
2565
|
+
|
|
2566
|
+
return HTP_STATUS_OK;
|
|
2567
|
+
}
|
|
2568
|
+
|
|
2569
|
+
int op_matmul_id(struct htp_ops_context * octx) {
|
|
2570
|
+
htp_matmul_tensors_preamble;
|
|
2571
|
+
|
|
2572
|
+
struct htp_matmul_context mmctx_struct = {0};
|
|
2573
|
+
struct htp_matmul_context * mmctx = &mmctx_struct;
|
|
2574
|
+
mmctx->octx = octx;
|
|
2575
|
+
|
|
2576
|
+
struct htp_tensor * restrict ids = &octx->src2;
|
|
2577
|
+
|
|
2578
|
+
const size_t src0_row_size = nb01;
|
|
2579
|
+
const size_t dst_row_size = nb1;
|
|
2580
|
+
|
|
2581
|
+
const size_t src0_row_size_padded = hex_round_up(src0_row_size, 128);
|
|
2582
|
+
|
|
2583
|
+
const uint32_t src0_nrows = ne01; // per expert
|
|
2584
|
+
const uint32_t src1_nrows = ne11 * ne12 * ne13;
|
|
2585
|
+
|
|
2586
|
+
worker_callback_t quant_job_func;
|
|
2587
|
+
worker_callback_t matmul_id_job_func = src1_nrows > 1 ? matmul_id : matvec_id;
|
|
2588
|
+
|
|
2589
|
+
// Compute src0_nrows_per_thread
|
|
2590
|
+
mmctx->src0_nrows_per_thread = (src0_nrows + octx->n_threads - 1) / octx->n_threads;
|
|
2591
|
+
mmctx->src0_nrows_per_thread += (mmctx->src0_nrows_per_thread & 1); // round up to even
|
|
2592
|
+
|
|
2593
|
+
size_t src1_row_size;
|
|
2594
|
+
size_t src1_row_size_padded;
|
|
2595
|
+
|
|
2596
|
+
// row groups
|
|
2597
|
+
const int n_ids = ids->ne[0]; // n_expert_used
|
|
2598
|
+
const int n_as = ne02; // n_expert
|
|
2599
|
+
|
|
2600
|
+
size_t matrix_row_counts_size = n_as * sizeof(uint32_t);
|
|
2601
|
+
size_t matrix_row_map_size = n_as * ids->ne[0] * ids->ne[1] * sizeof(struct mmid_row_mapping);
|
|
2602
|
+
|
|
2603
|
+
if (htp_mminit_vec_dot(mmctx, src0->type) != 0) {
|
|
2604
|
+
return HTP_STATUS_NO_SUPPORT;
|
|
2605
|
+
}
|
|
2606
|
+
|
|
2607
|
+
quant_job_func = quantize_f32_q8x4x2;
|
|
2608
|
+
src1_row_size = q8x4x2_row_size(ne10);
|
|
2609
|
+
|
|
2610
|
+
const size_t src2_spad_size_per_thread = hex_round_up(matrix_row_counts_size + matrix_row_map_size, 256);
|
|
2611
|
+
htp_mminit_spad(octx, dst_row_size, src0_row_size_padded, src1_row_size, src1_nrows, src2_spad_size_per_thread);
|
|
2612
|
+
|
|
2613
|
+
size_t spad_size = octx->src2_spad.size + octx->src1_spad.size + octx->src0_spad.size + octx->dst_spad.size;
|
|
2614
|
+
|
|
2615
|
+
FARF(HIGH, "matmul-id-%s : src0-spad-size %u src1-spad-size %u src2-spad-size %u dst-spad-size %u (%zu)\n", mmctx->type,
|
|
2616
|
+
octx->src0_spad.size, octx->src1_spad.size, octx->src2_spad.size, octx->dst_spad.size, spad_size);
|
|
2617
|
+
|
|
2618
|
+
FARF(HIGH, "matmul-id-%s : %ux%ux%ux%u * %ux%ux%ux%u (%ux%ux%ux%u) -> %ux%ux%ux%u (0x%p, 0x%p, 0x%p)\n", mmctx->type,
|
|
2619
|
+
src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3],
|
|
2620
|
+
ids->ne[0], ids->ne[1], ids->ne[2], ids->ne[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], src0->data,
|
|
2621
|
+
src1->data, dst->data);
|
|
2622
|
+
|
|
2623
|
+
// Make sure the reserved vtcm size is sufficient
|
|
2624
|
+
if (octx->ctx->vtcm_size < spad_size) {
|
|
2625
|
+
FARF(ERROR, "matmul-id-%s : current VTCM reservation %zu is too small, needed %zu\n", mmctx->type, octx->ctx->vtcm_size, spad_size);
|
|
2626
|
+
return HTP_STATUS_VTCM_TOO_SMALL;
|
|
2627
|
+
}
|
|
2628
|
+
|
|
2629
|
+
octx->src0_spad.data = octx->ctx->vtcm_base;
|
|
2630
|
+
octx->src1_spad.data = octx->src0_spad.data + octx->src0_spad.size;
|
|
2631
|
+
octx->src2_spad.data = octx->src1_spad.data + octx->src1_spad.size;
|
|
2632
|
+
octx->dst_spad.data = octx->src2_spad.data + octx->src2_spad.size;
|
|
2633
|
+
|
|
2634
|
+
octx->src0_spad.stride = src0_row_size_padded;
|
|
2635
|
+
octx->src1_spad.stride = src1_row_size;
|
|
2636
|
+
|
|
2637
|
+
if (src1_nrows > 1) {
|
|
2638
|
+
// initialize matrix_row_counts and map
|
|
2639
|
+
uint32_t * matrix_row_counts = (uint32_t *) octx->src2_spad.data + 0;
|
|
2640
|
+
struct mmid_row_mapping * matrix_rows = (void *) octx->src2_spad.data + matrix_row_counts_size;
|
|
2641
|
+
|
|
2642
|
+
memset(matrix_row_counts, 0, n_as * sizeof(uint32_t));
|
|
2643
|
+
|
|
2644
|
+
// group rows by src0 matrix
|
|
2645
|
+
for (uint32_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) { // token idx
|
|
2646
|
+
for (uint32_t id = 0; id < n_ids; ++id) { // expert idx
|
|
2647
|
+
const uint32_t i02 = *(const uint32_t *) ((const uint8_t *) ids->data + iid1 * ids->nb[1] + id * ids->nb[0]);
|
|
2648
|
+
|
|
2649
|
+
assert(i02 >= 0 && i02 < n_as);
|
|
2650
|
+
|
|
2651
|
+
MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = (struct mmid_row_mapping) { id, iid1 };
|
|
2652
|
+
matrix_row_counts[i02] += 1;
|
|
2653
|
+
}
|
|
2654
|
+
}
|
|
2655
|
+
}
|
|
2656
|
+
|
|
2657
|
+
// Setup worker pool callbacks
|
|
2658
|
+
if (!(octx->flags & HTP_OPFLAGS_SKIP_QUANTIZE)) {
|
|
2659
|
+
const uint32_t n_quant_jobs = MIN(src1_nrows, octx->n_threads);
|
|
2660
|
+
mmctx->src1_nrows_per_thread = (src1_nrows + n_quant_jobs - 1) / n_quant_jobs;
|
|
2661
|
+
worker_pool_run_func(octx->ctx->worker_pool, quant_job_func, mmctx, n_quant_jobs);
|
|
2662
|
+
}
|
|
2663
|
+
|
|
2664
|
+
if (!(octx->flags & HTP_OPFLAGS_SKIP_COMPUTE)) {
|
|
2665
|
+
const uint32_t n_matmul_jobs = octx->n_threads;
|
|
2666
|
+
worker_pool_run_func(octx->ctx->worker_pool, matmul_id_job_func, mmctx, n_matmul_jobs);
|
|
2667
|
+
}
|
|
2668
|
+
|
|
2669
|
+
return HTP_STATUS_OK;
|
|
2670
|
+
}
|