whispercpp 1.3.5 → 1.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +1 -1
- data/README.md +99 -2
- data/ext/extconf.rb +1 -0
- data/ext/ruby_whisper.c +20 -4
- data/ext/ruby_whisper.h +30 -2
- data/ext/ruby_whisper_context.c +216 -124
- data/ext/ruby_whisper_context_params.c +163 -0
- data/ext/ruby_whisper_model.c +0 -1
- data/ext/ruby_whisper_params.c +0 -1
- data/ext/ruby_whisper_segment.c +0 -1
- data/ext/ruby_whisper_token.c +29 -9
- data/ext/ruby_whisper_transcribe.cpp +4 -1
- data/ext/ruby_whisper_vad_context.c +48 -1
- data/ext/ruby_whisper_vad_context_detect.cpp +6 -5
- data/ext/ruby_whisper_vad_params.c +0 -1
- data/ext/ruby_whisper_vad_segment.c +0 -1
- data/ext/ruby_whisper_vad_segments.c +0 -1
- data/ext/sources/CMakeLists.txt +1 -1
- data/ext/sources/bindings/javascript/package.json +1 -1
- data/ext/sources/cmake/whisper-config.cmake.in +5 -40
- data/ext/sources/examples/bench/bench.cpp +23 -18
- data/ext/sources/examples/cli/cli.cpp +8 -0
- data/ext/sources/examples/common-ggml.cpp +2 -0
- data/ext/sources/examples/miniaudio.h +4507 -2131
- data/ext/sources/examples/server/server.cpp +18 -4
- data/ext/sources/examples/talk-llama/CMakeLists.txt +3 -2
- data/ext/sources/examples/talk-llama/llama-adapter.cpp +7 -13
- data/ext/sources/examples/talk-llama/llama-adapter.h +4 -3
- data/ext/sources/examples/talk-llama/llama-arch.cpp +335 -17
- data/ext/sources/examples/talk-llama/llama-arch.h +42 -0
- data/ext/sources/examples/talk-llama/llama-batch.cpp +3 -1
- data/ext/sources/examples/talk-llama/llama-chat.cpp +21 -1
- data/ext/sources/examples/talk-llama/llama-chat.h +1 -0
- data/ext/sources/examples/talk-llama/llama-context.cpp +508 -520
- data/ext/sources/examples/talk-llama/llama-context.h +27 -28
- data/ext/sources/examples/talk-llama/llama-cparams.h +5 -0
- data/ext/sources/examples/talk-llama/llama-ext.h +12 -0
- data/ext/sources/examples/talk-llama/llama-grammar.cpp +8 -8
- data/ext/sources/examples/talk-llama/llama-graph.cpp +583 -130
- data/ext/sources/examples/talk-llama/llama-graph.h +131 -10
- data/ext/sources/examples/talk-llama/llama-hparams.cpp +57 -40
- data/ext/sources/examples/talk-llama/llama-hparams.h +79 -10
- data/ext/sources/examples/talk-llama/llama-impl.cpp +4 -4
- data/ext/sources/examples/talk-llama/llama-impl.h +13 -1
- data/ext/sources/examples/talk-llama/llama-kv-cache-iswa.cpp +3 -1
- data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +274 -89
- data/ext/sources/examples/talk-llama/llama-kv-cache.h +2 -3
- data/ext/sources/examples/talk-llama/llama-memory-hybrid-iswa.cpp +275 -0
- data/ext/sources/examples/talk-llama/llama-memory-hybrid-iswa.h +140 -0
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.cpp +11 -13
- data/ext/sources/examples/talk-llama/llama-mmap.cpp +28 -11
- data/ext/sources/examples/talk-llama/llama-model-loader.cpp +527 -119
- data/ext/sources/examples/talk-llama/llama-model-loader.h +35 -5
- data/ext/sources/examples/talk-llama/llama-model-saver.cpp +60 -46
- data/ext/sources/examples/talk-llama/llama-model-saver.h +5 -2
- data/ext/sources/examples/talk-llama/llama-model.cpp +1365 -647
- data/ext/sources/examples/talk-llama/llama-model.h +72 -19
- data/ext/sources/examples/talk-llama/llama-quant.cpp +578 -346
- data/ext/sources/examples/talk-llama/{llama-sampling.cpp → llama-sampler.cpp} +190 -76
- data/ext/sources/examples/talk-llama/{llama-sampling.h → llama-sampler.h} +0 -2
- data/ext/sources/examples/talk-llama/llama-vocab.cpp +118 -48
- data/ext/sources/examples/talk-llama/llama-vocab.h +5 -0
- data/ext/sources/examples/talk-llama/llama.cpp +76 -22
- data/ext/sources/examples/talk-llama/llama.h +63 -30
- data/ext/sources/examples/talk-llama/models/afmoe.cpp +2 -3
- data/ext/sources/examples/talk-llama/models/apertus.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/arcee.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/arctic.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/baichuan.cpp +4 -3
- data/ext/sources/examples/talk-llama/models/bailingmoe.cpp +1 -2
- data/ext/sources/examples/talk-llama/models/bailingmoe2.cpp +3 -5
- data/ext/sources/examples/talk-llama/models/bert.cpp +13 -7
- data/ext/sources/examples/talk-llama/models/bitnet.cpp +9 -24
- data/ext/sources/examples/talk-llama/models/bloom.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/chameleon.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/chatglm.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/codeshell.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/cogvlm.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/cohere2-iswa.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/command-r.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/dbrx.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/deci.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/deepseek.cpp +4 -6
- data/ext/sources/examples/talk-llama/models/deepseek2.cpp +24 -21
- data/ext/sources/examples/talk-llama/models/delta-net-base.cpp +445 -0
- data/ext/sources/examples/talk-llama/models/dots1.cpp +4 -6
- data/ext/sources/examples/talk-llama/models/dream.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/ernie4-5-moe.cpp +4 -6
- data/ext/sources/examples/talk-llama/models/ernie4-5.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/eurobert.cpp +97 -0
- data/ext/sources/examples/talk-llama/models/exaone-moe.cpp +145 -0
- data/ext/sources/examples/talk-llama/models/exaone.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/exaone4.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/falcon-h1.cpp +2 -4
- data/ext/sources/examples/talk-llama/models/falcon.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/gemma-embedding.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma2-iswa.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma3.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma3n-iswa.cpp +7 -7
- data/ext/sources/examples/talk-llama/models/glm4-moe.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/glm4.cpp +14 -7
- data/ext/sources/examples/talk-llama/models/gpt2.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/gptneox.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/granite-hybrid.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/granite.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/grok.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/grovemoe.cpp +5 -7
- data/ext/sources/examples/talk-llama/models/hunyuan-dense.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/hunyuan-moe.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/internlm2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/jais.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/jais2.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/jamba.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/kimi-linear.cpp +381 -0
- data/ext/sources/examples/talk-llama/models/lfm2.cpp +145 -124
- data/ext/sources/examples/talk-llama/models/llada-moe.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/llada.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/llama-iswa.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/llama.cpp +18 -11
- data/ext/sources/examples/talk-llama/models/maincoder.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/{graph-context-mamba.cpp → mamba-base.cpp} +9 -3
- data/ext/sources/examples/talk-llama/models/mamba.cpp +1 -2
- data/ext/sources/examples/talk-llama/models/mimo2-iswa.cpp +11 -5
- data/ext/sources/examples/talk-llama/models/minicpm3.cpp +14 -13
- data/ext/sources/examples/talk-llama/models/minimax-m2.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/mistral3.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/models.h +181 -46
- data/ext/sources/examples/talk-llama/models/modern-bert.cpp +2 -9
- data/ext/sources/examples/talk-llama/models/mpt.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/nemotron-h.cpp +26 -14
- data/ext/sources/examples/talk-llama/models/nemotron.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/neo-bert.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/olmo.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/olmo2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/olmoe.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/openai-moe-iswa.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/openelm.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/orion.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/paddleocr.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/pangu-embedded.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/phi2.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/phi3.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/plamo.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/plamo2.cpp +9 -5
- data/ext/sources/examples/talk-llama/models/plamo3.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/plm.cpp +15 -14
- data/ext/sources/examples/talk-llama/models/qwen.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/qwen2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/qwen2moe.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/qwen2vl.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/qwen3.cpp +12 -9
- data/ext/sources/examples/talk-llama/models/qwen35.cpp +381 -0
- data/ext/sources/examples/talk-llama/models/qwen35moe.cpp +422 -0
- data/ext/sources/examples/talk-llama/models/qwen3moe.cpp +15 -8
- data/ext/sources/examples/talk-llama/models/qwen3next.cpp +84 -432
- data/ext/sources/examples/talk-llama/models/qwen3vl-moe.cpp +9 -18
- data/ext/sources/examples/talk-llama/models/qwen3vl.cpp +8 -17
- data/ext/sources/examples/talk-llama/models/refact.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/rnd1.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/rwkv6-base.cpp +2 -0
- data/ext/sources/examples/talk-llama/models/rwkv7-base.cpp +2 -0
- data/ext/sources/examples/talk-llama/models/seed-oss.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/smallthinker.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/smollm3.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/stablelm.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/starcoder.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/starcoder2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/step35-iswa.cpp +165 -0
- data/ext/sources/examples/talk-llama/models/t5-dec.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/t5-enc.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/xverse.cpp +3 -3
- data/ext/sources/examples/talk-llama/unicode.cpp +21 -65
- data/ext/sources/ggml/CMakeLists.txt +9 -3
- data/ext/sources/ggml/include/ggml-backend.h +1 -1
- data/ext/sources/ggml/include/ggml-cann.h +1 -1
- data/ext/sources/ggml/include/ggml-cpu.h +5 -0
- data/ext/sources/ggml/include/ggml-openvino.h +37 -0
- data/ext/sources/ggml/include/ggml-opt.h +1 -1
- data/ext/sources/ggml/include/ggml-rpc.h +6 -1
- data/ext/sources/ggml/include/ggml-virtgpu.h +14 -0
- data/ext/sources/ggml/include/ggml.h +56 -9
- data/ext/sources/ggml/src/CMakeLists.txt +3 -0
- data/ext/sources/ggml/src/ggml-alloc.c +4 -9
- data/ext/sources/ggml/src/ggml-backend-dl.cpp +48 -0
- data/ext/sources/ggml/src/ggml-backend-dl.h +45 -0
- data/ext/sources/ggml/src/ggml-backend-reg.cpp +28 -86
- data/ext/sources/ggml/src/ggml-backend.cpp +5 -2
- data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-blas/ggml-blas.cpp +6 -2
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.cpp +1 -1
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.h +1 -1
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.cpp +348 -189
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +40 -85
- data/ext/sources/ggml/src/ggml-cann/common.h +3 -4
- data/ext/sources/ggml/src/ggml-cann/ggml-cann.cpp +44 -62
- data/ext/sources/ggml/src/ggml-common.h +11 -0
- data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +16 -11
- data/ext/sources/ggml/src/ggml-cpu/amx/amx.cpp +42 -19
- data/ext/sources/ggml/src/ggml-cpu/amx/common.h +34 -10
- data/ext/sources/ggml/src/ggml-cpu/amx/mmq.cpp +85 -85
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/quants.c +85 -1
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/repack.cpp +2744 -548
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/quants.c +1653 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/repack.cpp +1391 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/s390/quants.c +8 -10
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/quants.c +9 -9
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/repack.cpp +118 -18
- data/ext/sources/ggml/src/ggml-cpu/arch-fallback.h +107 -26
- data/ext/sources/ggml/src/ggml-cpu/binary-ops.cpp +2 -6
- data/ext/sources/ggml/src/ggml-cpu/common.h +8 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-impl.h +3 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +59 -12
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.cpp +15 -0
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +21 -20
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +965 -252
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +584 -197
- data/ext/sources/ggml/src/ggml-cpu/ops.cpp +903 -188
- data/ext/sources/ggml/src/ggml-cpu/ops.h +1 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.c +40 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.h +3 -0
- data/ext/sources/ggml/src/ggml-cpu/repack.cpp +2890 -679
- data/ext/sources/ggml/src/ggml-cpu/repack.h +119 -8
- data/ext/sources/ggml/src/ggml-cpu/simd-gemm.h +136 -0
- data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +111 -3
- data/ext/sources/ggml/src/ggml-cpu/unary-ops.cpp +1 -1
- data/ext/sources/ggml/src/ggml-cpu/vec.cpp +17 -0
- data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-cuda/argsort.cu +19 -10
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +32 -30
- data/ext/sources/ggml/src/ggml-cuda/common.cuh +134 -18
- data/ext/sources/ggml/src/ggml-cuda/convert.cu +41 -27
- data/ext/sources/ggml/src/ggml-cuda/cpy.cu +6 -3
- data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +78 -64
- data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +384 -143
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cuh +36 -22
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec.cuh +3 -3
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +26 -5
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +1 -1
- data/ext/sources/ggml/src/ggml-cuda/fattn.cu +127 -12
- data/ext/sources/ggml/src/ggml-cuda/gated_delta_net.cu +263 -0
- data/ext/sources/ggml/src/ggml-cuda/gated_delta_net.cuh +4 -0
- data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +595 -200
- data/ext/sources/ggml/src/ggml-cuda/mean.cu +9 -8
- data/ext/sources/ggml/src/ggml-cuda/mma.cuh +173 -6
- data/ext/sources/ggml/src/ggml-cuda/mmf.cu +30 -10
- data/ext/sources/ggml/src/ggml-cuda/mmf.cuh +158 -85
- data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +34 -22
- data/ext/sources/ggml/src/ggml-cuda/mmvf.cu +127 -67
- data/ext/sources/ggml/src/ggml-cuda/mmvf.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +157 -65
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cuh +1 -0
- data/ext/sources/ggml/src/ggml-cuda/norm.cu +18 -76
- data/ext/sources/ggml/src/ggml-cuda/pad.cu +13 -10
- data/ext/sources/ggml/src/ggml-cuda/quantize.cu +1 -1
- data/ext/sources/ggml/src/ggml-cuda/reduce_rows.cuh +2 -16
- data/ext/sources/ggml/src/ggml-cuda/rope.cu +233 -133
- data/ext/sources/ggml/src/ggml-cuda/softmax.cu +8 -83
- data/ext/sources/ggml/src/ggml-cuda/solve_tri.cu +1 -1
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +56 -32
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cuh +1 -1
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_32.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_32.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +3 -3
- data/ext/sources/ggml/src/ggml-cuda/top-k.cu +0 -1
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cu +199 -135
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cuh +20 -14
- data/ext/sources/ggml/src/ggml-cuda/unary.cu +55 -0
- data/ext/sources/ggml/src/ggml-cuda/unary.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +31 -17
- data/ext/sources/ggml/src/ggml-cuda/vendors/hip.h +10 -0
- data/ext/sources/ggml/src/ggml-hexagon/CMakeLists.txt +82 -45
- data/ext/sources/ggml/src/ggml-hexagon/ggml-hexagon.cpp +334 -160
- data/ext/sources/ggml/src/ggml-hexagon/htp/CMakeLists.txt +7 -5
- data/ext/sources/ggml/src/ggml-hexagon/htp/act-ops.c +328 -197
- data/ext/sources/ggml/src/ggml-hexagon/htp/argsort-ops.c +281 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/binary-ops.c +765 -234
- data/ext/sources/ggml/src/ggml-hexagon/htp/cpy-ops.c +252 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/flash-attn-ops.c +412 -265
- data/ext/sources/ggml/src/ggml-hexagon/htp/get-rows-ops.c +23 -23
- data/ext/sources/ggml/src/ggml-hexagon/htp/{htp-dma.c → hex-dma.c} +1 -1
- data/ext/sources/ggml/src/ggml-hexagon/htp/{htp-dma.h → hex-dma.h} +28 -3
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-dump.h +77 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-fastdiv.h +37 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-utils.h +51 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ctx.h +1 -1
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-msg.h +27 -37
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ops.h +6 -35
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-arith.h +443 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-base.h +240 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-copy.h +245 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-div.h +251 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-dump.h +129 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-exp.h +215 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-floor.h +100 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-inverse.h +210 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-reduce.h +296 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-scale.h +133 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sigmoid.h +141 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sqrt.h +126 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-types.h +36 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-utils.h +20 -1347
- data/ext/sources/ggml/src/ggml-hexagon/htp/main.c +211 -13
- data/ext/sources/ggml/src/ggml-hexagon/htp/matmul-ops.c +1119 -952
- data/ext/sources/ggml/src/ggml-hexagon/htp/rope-ops.c +254 -244
- data/ext/sources/ggml/src/ggml-hexagon/htp/set-rows-ops.c +36 -36
- data/ext/sources/ggml/src/ggml-hexagon/htp/softmax-ops.c +155 -138
- data/ext/sources/ggml/src/ggml-hexagon/htp/ssm-conv.c +339 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/sum-rows-ops.c +128 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/unary-ops.c +209 -114
- data/ext/sources/ggml/src/ggml-hexagon/htp/worker-pool.c +1 -5
- data/ext/sources/ggml/src/ggml-hexagon/htp-drv.cpp +418 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp-drv.h +121 -0
- data/ext/sources/ggml/src/ggml-hexagon/libdl.h +79 -0
- data/ext/sources/ggml/src/ggml-hexagon/libggml-htp.inf +38 -0
- data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +6 -0
- data/ext/sources/ggml/src/ggml-impl.h +62 -0
- data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +10 -10
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-common.cpp +13 -2
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.h +8 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.m +147 -17
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.cpp +274 -73
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.h +22 -4
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.m +102 -36
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +174 -23
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.cpp +580 -280
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.h +5 -4
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.cpp +320 -107
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.metal +1068 -825
- data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +19 -1
- data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +3108 -636
- data/ext/sources/ggml/src/ggml-opencl/kernels/concat.cl +41 -99
- data/ext/sources/ggml/src/ggml-opencl/kernels/cpy.cl +45 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cumsum.cl +139 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +204 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/diag.cl +27 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/exp.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/expm1.cl +87 -56
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemm_noshuffle_q4_1_f32.cl +132 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general_q8_0_f32.cl +195 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_q4_1_f32.cl +283 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/l2_norm.cl +71 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mean.cl +114 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q4_0_f32_l4_lm.cl +163 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q4_1_f32_l4_lm.cl +165 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q6_k_f32_l4_lm.cl +158 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q8_0_f32_8x4.cl +129 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_1_f32.cl +219 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_1_f32_flat.cl +229 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_k_f32.cl +180 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/{mul_mv_q6_k.cl → mul_mv_q6_k_f32.cl} +4 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q6_k_f32_flat.cl +194 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/neg.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/repeat.cl +31 -32
- data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +14 -4
- data/ext/sources/ggml/src/ggml-opencl/kernels/softplus.cl +88 -60
- data/ext/sources/ggml/src/ggml-opencl/kernels/solve_tri.cl +51 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sum_rows.cl +114 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/tanh.cl +94 -48
- data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +26 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/tri.cl +32 -0
- data/ext/sources/ggml/src/ggml-openvino/.clang-format +154 -0
- data/ext/sources/ggml/src/ggml-openvino/CMakeLists.txt +22 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-decoder.cpp +975 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-decoder.h +294 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino-extra.cpp +373 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino-extra.h +182 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino.cpp +1110 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-quants.cpp +884 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-quants.h +153 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/decoder.h +74 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/frontend.cpp +27 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/frontend.h +23 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/input_model.cpp +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/input_model.h +29 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/node_context.h +112 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/cont.cpp +48 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/cpy.cpp +21 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/flash_attn_ext.cpp +90 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/get_rows.cpp +69 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/glu_geglu.cpp +61 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/glu_swiglu.cpp +62 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/mulmat.cpp +90 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/permute.cpp +102 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/reshape.cpp +83 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/rms_norm.cpp +46 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/rope.cpp +123 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/scale.cpp +41 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/set_rows.cpp +76 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/softmax.cpp +89 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/transpose.cpp +23 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/unary_silu.cpp +27 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/view.cpp +53 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op_table.cpp +46 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op_table.h +39 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/eliminate_zp.cpp +123 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/eliminate_zp.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/fuse_to_sdpa.cpp +60 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/fuse_to_sdpa.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/mark_decompression_convert_constant_folding.h +29 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/squeeze_matmul.cpp +58 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/squeeze_matmul.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/translate_session.cpp +293 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/translate_session.h +28 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/utils.cpp +226 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/utils.h +85 -0
- data/ext/sources/ggml/src/ggml-openvino/utils.cpp +823 -0
- data/ext/sources/ggml/src/ggml-openvino/utils.h +123 -0
- data/ext/sources/ggml/src/ggml-quants.c +96 -5
- data/ext/sources/ggml/src/ggml-quants.h +3 -0
- data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +15 -88
- data/ext/sources/ggml/src/ggml-sycl/add-id.cpp +5 -1
- data/ext/sources/ggml/src/ggml-sycl/backend.hpp +1 -0
- data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +21 -20
- data/ext/sources/ggml/src/ggml-sycl/common.hpp +315 -10
- data/ext/sources/ggml/src/ggml-sycl/convert.cpp +69 -1
- data/ext/sources/ggml/src/ggml-sycl/convert.hpp +22 -1
- data/ext/sources/ggml/src/ggml-sycl/count-equal.cpp +1 -1
- data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +791 -47
- data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +78 -68
- data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +2 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-common.hpp +1179 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-tile.cpp +55 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-tile.hpp +1338 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-vec.hpp +667 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn.cpp +225 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn.hpp +22 -0
- data/ext/sources/ggml/src/ggml-sycl/gated_delta_net.cpp +309 -0
- data/ext/sources/ggml/src/ggml-sycl/gated_delta_net.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/ggml-sycl.cpp +316 -51
- data/ext/sources/ggml/src/ggml-sycl/norm.cpp +65 -66
- data/ext/sources/ggml/src/ggml-sycl/outprod.cpp +3 -3
- data/ext/sources/ggml/src/ggml-sycl/presets.hpp +3 -0
- data/ext/sources/ggml/src/ggml-sycl/quants.hpp +1 -1
- data/ext/sources/ggml/src/ggml-sycl/rope.cpp +450 -287
- data/ext/sources/ggml/src/ggml-sycl/rope.hpp +6 -0
- data/ext/sources/ggml/src/ggml-sycl/softmax.cpp +6 -6
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq112-dv112.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq128-dv128.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq256-dv256.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq40-dv40.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq576-dv512.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq64-dv64.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq72-dv72.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq80-dv80.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq96-dv96.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +13 -0
- data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +1 -1
- data/ext/sources/ggml/src/ggml-virtgpu/CMakeLists.txt +70 -0
- data/ext/sources/ggml/src/ggml-virtgpu/apir_cs_ggml-rpc-front.cpp +87 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/CMakeLists.txt +21 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/apir_cs_ggml-rpc-back.cpp +115 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-convert.h +13 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-backend.cpp +102 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-buffer-type.cpp +105 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-buffer.cpp +179 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-device.cpp +148 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.cpp +51 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.gen.h +73 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.h +27 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-virgl-apir.h +32 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend.cpp +144 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/api_remoting.h +95 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_backend.gen.h +94 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_backend.h +50 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs.h +378 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs_ggml.h +232 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs_rpc.h +58 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-buffer-type.cpp +81 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-buffer.cpp +119 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-device.cpp +158 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-reg.cpp +213 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend.cpp +69 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-remoting.h +71 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggmlremoting_functions.yaml +166 -0
- data/ext/sources/ggml/src/ggml-virtgpu/include/apir_hw.h +9 -0
- data/ext/sources/ggml/src/ggml-virtgpu/regenerate_remoting.py +333 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-apir.h +15 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-backend.cpp +58 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-buffer-type.cpp +110 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-buffer.cpp +173 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-device.cpp +192 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-impl.h +36 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward.gen.h +53 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-shm.cpp +98 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-shm.h +23 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-utils.cpp +179 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-utils.h +86 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu.cpp +544 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu.h +117 -0
- data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/ggml-vulkan.cpp +1250 -465
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +16 -8
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/elu.comp +27 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +374 -170
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.glsl +66 -22
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +389 -201
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +106 -58
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_mask_opt.comp +162 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +9 -8
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gated_delta_net.comp +128 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +12 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.glsl +20 -17
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +11 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +8 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_id_funcs.glsl +3 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +5 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.glsl +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +2 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_funcs.glsl +36 -63
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_params.glsl +10 -5
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sgn.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp +16 -10
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +55 -35
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu-shader-lib.hpp +1314 -109
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu.cpp +1660 -1371
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argmax.wgsl +72 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argsort.wgsl +106 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argsort_merge.wgsl +134 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary.wgsl +141 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/common_decls.tmpl +65 -72
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/concat.wgsl +75 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cpy.tmpl.wgsl +6 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cumsum.wgsl +66 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/embed_wgsl.py +40 -5
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/flash_attn.wgsl +105 -60
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{get_rows.tmpl.wgsl → get_rows.wgsl} +53 -259
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat.tmpl.wgsl → mul_mat.wgsl} +68 -257
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_decls.tmpl +692 -23
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat_reg_tile.tmpl.wgsl → mul_mat_reg_tile.wgsl} +28 -128
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat_subgroup_matrix.tmpl.wgsl → mul_mat_subgroup_matrix.wgsl} +31 -137
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_vec.wgsl +480 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/pad.wgsl +86 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/repeat.wgsl +67 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{scale.tmpl.wgsl → scale.wgsl} +9 -36
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.wgsl +40 -12
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/sum_rows.wgsl +55 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/unary.wgsl +193 -0
- data/ext/sources/ggml/src/ggml-zdnn/ggml-zdnn.cpp +6 -1
- data/ext/sources/ggml/src/ggml-zendnn/CMakeLists.txt +31 -32
- data/ext/sources/ggml/src/ggml-zendnn/ggml-zendnn.cpp +9 -6
- data/ext/sources/ggml/src/ggml.c +167 -33
- data/ext/sources/ggml/src/gguf.cpp +229 -44
- data/ext/sources/src/whisper.cpp +6 -28
- data/sig/whisper.rbs +43 -2
- data/test/test_context_params.rb +82 -0
- data/test/test_token.rb +11 -0
- data/test/test_vad_context.rb +58 -8
- data/test/test_whisper.rb +20 -0
- data/whispercpp.gemspec +1 -1
- metadata +240 -28
- data/ext/sources/ggml/cmake/BuildTypes.cmake +0 -54
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm-ppc.h +0 -333
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-exp.c +0 -94
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-inverse.c +0 -72
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sigmoid.c +0 -49
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-utils.c +0 -1020
- data/ext/sources/ggml/src/ggml-hexagon/htp/ops-utils.h +0 -149
- data/ext/sources/ggml/src/ggml-hexagon/htp-utils.c +0 -454
- data/ext/sources/ggml/src/ggml-hexagon/htp-utils.h +0 -221
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/bin_op.tmpl.wgsl +0 -188
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary_head.tmpl +0 -45
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_vec.tmpl.wgsl +0 -267
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.tmpl.wgsl +0 -112
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/unary_op.wgsl +0 -483
|
@@ -1,1020 +0,0 @@
|
|
|
1
|
-
#pragma clang diagnostic ignored "-Wunused-variable"
|
|
2
|
-
#pragma clang diagnostic ignored "-Wunused-function"
|
|
3
|
-
#pragma clang diagnostic ignored "-Wunused-but-set-variable"
|
|
4
|
-
|
|
5
|
-
#ifdef HTP_DEBUG
|
|
6
|
-
# define FARF_HIGH 1
|
|
7
|
-
#endif
|
|
8
|
-
|
|
9
|
-
#include <HAP_farf.h>
|
|
10
|
-
#include <HAP_mem.h>
|
|
11
|
-
#include <HAP_perf.h>
|
|
12
|
-
#include <HAP_ps.h>
|
|
13
|
-
#include <hexagon_protos.h>
|
|
14
|
-
#include <hexagon_types.h>
|
|
15
|
-
#include <math.h>
|
|
16
|
-
#include <string.h>
|
|
17
|
-
|
|
18
|
-
#define GGML_COMMON_DECL_C
|
|
19
|
-
#include "ggml-common.h"
|
|
20
|
-
#include "hvx-utils.h"
|
|
21
|
-
|
|
22
|
-
#define htp_binary_ops_preamble \
|
|
23
|
-
int step_of_4 = num_elems >> 7; \
|
|
24
|
-
int step_of_2 = (num_elems - step_of_4 * VLEN_FP32 * 4) >> 6; \
|
|
25
|
-
int step_of_1 = (num_elems - step_of_4 * VLEN_FP32 * 4 - step_of_2 * VLEN_FP32 * 2) >> 5; \
|
|
26
|
-
int remaining = num_elems - step_of_4 * VLEN_FP32 * 4 - step_of_2 * VLEN_FP32 * 2 - step_of_1 * VLEN_FP32; \
|
|
27
|
-
\
|
|
28
|
-
const uint8_t * restrict src0_curr = src0; \
|
|
29
|
-
const uint8_t * restrict src1_curr = src1; \
|
|
30
|
-
uint8_t * restrict dst_curr = dst;
|
|
31
|
-
|
|
32
|
-
void hvx_mul_f32(const uint8_t * restrict src0,
|
|
33
|
-
const uint8_t * restrict src1,
|
|
34
|
-
uint8_t * restrict dst,
|
|
35
|
-
const int num_elems) {
|
|
36
|
-
int left_over = num_elems & (VLEN_FP32 - 1);
|
|
37
|
-
int num_elems_whole = num_elems - left_over;
|
|
38
|
-
|
|
39
|
-
int unaligned_addr = 0;
|
|
40
|
-
int unaligned_loop = 0;
|
|
41
|
-
if ((0 == htp_is_aligned((void *) src0, VLEN)) || (0 == htp_is_aligned((void *) src1, VLEN)) ||
|
|
42
|
-
(0 == htp_is_aligned((void *) dst, VLEN))) {
|
|
43
|
-
FARF(HIGH, "hvx_mul_f32: unaligned address in hvx op, possibly slower execution\n");
|
|
44
|
-
unaligned_addr = 1;
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
|
|
48
|
-
unaligned_loop = 1;
|
|
49
|
-
FARF(HIGH, "hvx_mul_f32: unaligned loop in hvx op, possibly slower execution\n");
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
bool handled_leftover = false;
|
|
54
|
-
if (0 == unaligned_loop) {
|
|
55
|
-
HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0;
|
|
56
|
-
HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1;
|
|
57
|
-
HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
|
|
58
|
-
|
|
59
|
-
#pragma unroll(4)
|
|
60
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
61
|
-
HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1++, *vec_in2++);
|
|
62
|
-
*vec_out++ = Q6_Vsf_equals_Vqf32(v);
|
|
63
|
-
}
|
|
64
|
-
} else {
|
|
65
|
-
int step_of_1 = num_elems_whole >> 5; // divby 32, because 32 float = 128 bytes per HVX vector
|
|
66
|
-
int leftover_size = left_over * sizeof(float);
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0;
|
|
70
|
-
HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1;
|
|
71
|
-
HVX_UVector * restrict vec_out = (HVX_UVector *) dst;
|
|
72
|
-
|
|
73
|
-
HVX_Vector slinep;
|
|
74
|
-
HVX_Vector slinec;
|
|
75
|
-
HVX_Vector sline;
|
|
76
|
-
HVX_Vector sline2p;
|
|
77
|
-
HVX_Vector sline2c;
|
|
78
|
-
HVX_Vector sline2;
|
|
79
|
-
|
|
80
|
-
slinep = *vec_in1++;
|
|
81
|
-
sline2p = *vec_in2++;
|
|
82
|
-
#pragma unroll(4)
|
|
83
|
-
for (int i = step_of_1 - 1; i > 0; i--) {
|
|
84
|
-
slinec = *vec_in1++;
|
|
85
|
-
sline2c = *vec_in2++;
|
|
86
|
-
sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src0);
|
|
87
|
-
sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) src1);
|
|
88
|
-
|
|
89
|
-
*((HVX_UVector *) (vec_out++)) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, sline2));
|
|
90
|
-
slinep = slinec;
|
|
91
|
-
sline2p = sline2c;
|
|
92
|
-
}
|
|
93
|
-
if (step_of_1 > 1) {
|
|
94
|
-
slinec = htp_is_aligned(vec_in1, VLEN) && left_over == 0 ? slinep : *vec_in1++;
|
|
95
|
-
sline2c = htp_is_aligned(vec_in2, VLEN) && left_over == 0 ? sline2p : *vec_in2++;
|
|
96
|
-
|
|
97
|
-
sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src0);
|
|
98
|
-
sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) src1);
|
|
99
|
-
*((HVX_UVector *) (vec_out++)) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, sline2));
|
|
100
|
-
slinep = slinec;
|
|
101
|
-
sline2p = sline2c;
|
|
102
|
-
}
|
|
103
|
-
if (left_over > 0) {
|
|
104
|
-
slinec = (is_in_one_chunk(vec_in1, leftover_size, VLEN) ? slinep : *vec_in1++);
|
|
105
|
-
|
|
106
|
-
sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src0);
|
|
107
|
-
sline2c = (is_in_one_chunk(vec_in2, leftover_size, VLEN) ? sline2p : *vec_in2++);
|
|
108
|
-
sline2 = Q6_V_valign_VVR(sline2c, sline2p, (size_t) src1);
|
|
109
|
-
|
|
110
|
-
HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(sline, sline2);
|
|
111
|
-
hvx_vec_store_u(vec_out, leftover_size, Q6_Vsf_equals_Vqf32(out));
|
|
112
|
-
handled_leftover = true;
|
|
113
|
-
}
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
if (left_over > 0 && !handled_leftover) {
|
|
118
|
-
const float * src0f = (const float *) src0 + num_elems_whole;
|
|
119
|
-
const float * src1f = (const float *) src1 + num_elems_whole;
|
|
120
|
-
float * dstf = (float *) dst + num_elems_whole;
|
|
121
|
-
|
|
122
|
-
HVX_Vector in1 = *(HVX_UVector *) src0f;
|
|
123
|
-
HVX_Vector in2 = *(HVX_UVector *) src1f;
|
|
124
|
-
|
|
125
|
-
HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in1, in2);
|
|
126
|
-
hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out));
|
|
127
|
-
}
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
void hvx_mul_f32_opt(const uint8_t * restrict src0,
|
|
131
|
-
const uint8_t * restrict src1,
|
|
132
|
-
uint8_t * restrict dst,
|
|
133
|
-
const int num_elems) {
|
|
134
|
-
htp_binary_ops_preamble;
|
|
135
|
-
|
|
136
|
-
for (int i = 0; i < step_of_4; i++) {
|
|
137
|
-
HVX_Vector v1a = *(HVX_Vector *) src0_curr;
|
|
138
|
-
|
|
139
|
-
HVX_Vector v1b = *(HVX_Vector *) src1_curr;
|
|
140
|
-
|
|
141
|
-
HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
|
|
142
|
-
|
|
143
|
-
HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(v1a, v1b);
|
|
144
|
-
|
|
145
|
-
HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
|
|
146
|
-
|
|
147
|
-
HVX_Vector v3a = *(HVX_Vector *) (src0_curr + 2 * VLEN);
|
|
148
|
-
|
|
149
|
-
HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v2a, v2b);
|
|
150
|
-
|
|
151
|
-
*(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
|
|
152
|
-
|
|
153
|
-
HVX_Vector v3b = *(HVX_Vector *) (src1_curr + 2 * VLEN);
|
|
154
|
-
|
|
155
|
-
HVX_Vector v4a = *(HVX_Vector *) (src0_curr + 3 * VLEN);
|
|
156
|
-
|
|
157
|
-
src0_curr += 4 * VLEN;
|
|
158
|
-
|
|
159
|
-
HVX_Vector v3 = Q6_Vqf32_vmpy_VsfVsf(v3a, v3b);
|
|
160
|
-
|
|
161
|
-
*(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
|
|
162
|
-
|
|
163
|
-
HVX_Vector v4b = *(HVX_Vector *) (src1_curr + 3 * VLEN);
|
|
164
|
-
|
|
165
|
-
*(HVX_Vector *) (dst_curr + 2 * VLEN) = Q6_Vsf_equals_Vqf32(v3);
|
|
166
|
-
|
|
167
|
-
HVX_Vector v4 = Q6_Vqf32_vmpy_VsfVsf(v4a, v4b);
|
|
168
|
-
|
|
169
|
-
src1_curr += 4 * VLEN;
|
|
170
|
-
|
|
171
|
-
*(HVX_Vector *) (dst_curr + 3 * VLEN) = Q6_Vsf_equals_Vqf32(v4);
|
|
172
|
-
|
|
173
|
-
dst_curr += 4 * VLEN;
|
|
174
|
-
}
|
|
175
|
-
|
|
176
|
-
for (int i = 0; i < step_of_2; i++) {
|
|
177
|
-
HVX_Vector v1a = *(HVX_Vector *) src0_curr;
|
|
178
|
-
|
|
179
|
-
HVX_Vector v1b = *(HVX_Vector *) src1_curr;
|
|
180
|
-
|
|
181
|
-
HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
|
|
182
|
-
|
|
183
|
-
HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(v1a, v1b);
|
|
184
|
-
|
|
185
|
-
HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
|
|
186
|
-
|
|
187
|
-
*(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
|
|
188
|
-
|
|
189
|
-
src0_curr += 2 * VLEN;
|
|
190
|
-
|
|
191
|
-
HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(v2a, v2b);
|
|
192
|
-
|
|
193
|
-
src1_curr += 2 * VLEN;
|
|
194
|
-
|
|
195
|
-
*(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
|
|
196
|
-
|
|
197
|
-
dst_curr += 2 * VLEN;
|
|
198
|
-
}
|
|
199
|
-
|
|
200
|
-
for (int i = 0; i < step_of_1; i++) {
|
|
201
|
-
HVX_Vector va = *(HVX_Vector *) src0_curr;
|
|
202
|
-
|
|
203
|
-
src0_curr += VLEN;
|
|
204
|
-
|
|
205
|
-
HVX_Vector vb = *(HVX_Vector *) src1_curr;
|
|
206
|
-
|
|
207
|
-
src1_curr += VLEN;
|
|
208
|
-
|
|
209
|
-
HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(va, vb);
|
|
210
|
-
|
|
211
|
-
*(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v);
|
|
212
|
-
|
|
213
|
-
dst_curr += VLEN;
|
|
214
|
-
}
|
|
215
|
-
|
|
216
|
-
if (remaining > 0) {
|
|
217
|
-
HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr);
|
|
218
|
-
hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v));
|
|
219
|
-
}
|
|
220
|
-
}
|
|
221
|
-
|
|
222
|
-
void hvx_mul_mul_f32_opt(const uint8_t * restrict src0,
|
|
223
|
-
const uint8_t * restrict src1,
|
|
224
|
-
const uint8_t * restrict src2,
|
|
225
|
-
uint8_t * restrict dst,
|
|
226
|
-
const int num_elems) {
|
|
227
|
-
const uint8_t * restrict src0_curr = src0;
|
|
228
|
-
const uint8_t * restrict src1_curr = src1;
|
|
229
|
-
const uint8_t * restrict src2_curr = src2;
|
|
230
|
-
uint8_t * restrict dst_curr = dst;
|
|
231
|
-
|
|
232
|
-
int step_of_2 = num_elems >> 6;
|
|
233
|
-
int step_of_1 = (num_elems - step_of_2 * VLEN_FP32 * 2) >> 5;
|
|
234
|
-
int remaining = num_elems - step_of_2 * VLEN_FP32 * 2 - step_of_1 * VLEN_FP32;
|
|
235
|
-
|
|
236
|
-
for (int i = 0; i < step_of_2; i++) {
|
|
237
|
-
HVX_Vector v1a = *(HVX_Vector *) src0_curr;
|
|
238
|
-
HVX_Vector v1b = *(HVX_Vector *) src1_curr;
|
|
239
|
-
HVX_Vector v1c = *(HVX_Vector *) src2_curr;
|
|
240
|
-
|
|
241
|
-
HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
|
|
242
|
-
|
|
243
|
-
HVX_Vector v1_ = Q6_Vqf32_vmpy_VsfVsf(v1a, v1b);
|
|
244
|
-
HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v1_), v1c);
|
|
245
|
-
|
|
246
|
-
HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
|
|
247
|
-
|
|
248
|
-
*(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
|
|
249
|
-
|
|
250
|
-
HVX_Vector v2c = *(HVX_Vector *) (src2_curr + VLEN);
|
|
251
|
-
|
|
252
|
-
src0_curr += 2 * VLEN;
|
|
253
|
-
|
|
254
|
-
HVX_Vector v2_ = Q6_Vqf32_vmpy_VsfVsf(v2a, v2b);
|
|
255
|
-
HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v2_), v2c);
|
|
256
|
-
|
|
257
|
-
src1_curr += 2 * VLEN;
|
|
258
|
-
src2_curr += 2 * VLEN;
|
|
259
|
-
|
|
260
|
-
*(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
|
|
261
|
-
|
|
262
|
-
dst_curr += 2 * VLEN;
|
|
263
|
-
}
|
|
264
|
-
for (int i = 0; i < step_of_1; i++) {
|
|
265
|
-
HVX_Vector va = *(HVX_Vector *) src0_curr;
|
|
266
|
-
src0_curr += VLEN;
|
|
267
|
-
|
|
268
|
-
HVX_Vector vb = *(HVX_Vector *) src1_curr;
|
|
269
|
-
src1_curr += VLEN;
|
|
270
|
-
|
|
271
|
-
HVX_Vector vc = *(HVX_Vector *) src2_curr;
|
|
272
|
-
src2_curr += VLEN;
|
|
273
|
-
|
|
274
|
-
HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(va, vb);
|
|
275
|
-
HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v1), vc);
|
|
276
|
-
|
|
277
|
-
*(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v2);
|
|
278
|
-
dst_curr += VLEN;
|
|
279
|
-
}
|
|
280
|
-
if (remaining > 0) {
|
|
281
|
-
HVX_Vector v1 = Q6_Vqf32_vmpy_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr);
|
|
282
|
-
HVX_Vector v2 = Q6_Vqf32_vmpy_VsfVsf(Q6_Vsf_equals_Vqf32(v1), *(HVX_Vector *) src2_curr);
|
|
283
|
-
hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v2));
|
|
284
|
-
}
|
|
285
|
-
}
|
|
286
|
-
|
|
287
|
-
void hvx_add_f32(const uint8_t * restrict src0,
|
|
288
|
-
const uint8_t * restrict src1,
|
|
289
|
-
uint8_t * restrict dst,
|
|
290
|
-
const int num_elems) {
|
|
291
|
-
int left_over = num_elems & (VLEN_FP32 - 1);
|
|
292
|
-
int num_elems_whole = num_elems - left_over;
|
|
293
|
-
|
|
294
|
-
int unaligned_addr = 0;
|
|
295
|
-
int unaligned_loop = 0;
|
|
296
|
-
if ((0 == htp_is_aligned((void *) src0, VLEN)) || (0 == htp_is_aligned((void *) src1, VLEN)) ||
|
|
297
|
-
(0 == htp_is_aligned((void *) dst, VLEN))) {
|
|
298
|
-
FARF(HIGH, "hvx_add_f32: unaligned address in hvx op, possibly slower execution\n");
|
|
299
|
-
unaligned_addr = 1;
|
|
300
|
-
}
|
|
301
|
-
|
|
302
|
-
if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
|
|
303
|
-
unaligned_loop = 1;
|
|
304
|
-
FARF(HIGH, "hvx_add_f32: unaligned loop in hvx op, possibly slower execution\n");
|
|
305
|
-
}
|
|
306
|
-
|
|
307
|
-
if (0 == unaligned_loop) {
|
|
308
|
-
HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0;
|
|
309
|
-
HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1;
|
|
310
|
-
HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
|
|
311
|
-
|
|
312
|
-
#pragma unroll(4)
|
|
313
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
314
|
-
HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(*vec_in1++, *vec_in2++);
|
|
315
|
-
*vec_out++ = Q6_Vsf_equals_Vqf32(v);
|
|
316
|
-
}
|
|
317
|
-
} else {
|
|
318
|
-
#pragma unroll(4)
|
|
319
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
320
|
-
HVX_Vector in1 = *(HVX_UVector *) (src0 + i * SIZEOF_FP32);
|
|
321
|
-
HVX_Vector in2 = *(HVX_UVector *) (src1 + i * SIZEOF_FP32);
|
|
322
|
-
|
|
323
|
-
HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in1, in2);
|
|
324
|
-
|
|
325
|
-
*(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out);
|
|
326
|
-
}
|
|
327
|
-
}
|
|
328
|
-
|
|
329
|
-
if (left_over > 0) {
|
|
330
|
-
const float * src0f = (const float *) src0 + num_elems_whole;
|
|
331
|
-
const float * src1f = (const float *) src1 + num_elems_whole;
|
|
332
|
-
float * dstf = (float *) dst + num_elems_whole;
|
|
333
|
-
|
|
334
|
-
HVX_Vector in1 = *(HVX_UVector *) src0f;
|
|
335
|
-
HVX_Vector in2 = *(HVX_UVector *) src1f;
|
|
336
|
-
|
|
337
|
-
HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in1, in2);
|
|
338
|
-
hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out));
|
|
339
|
-
}
|
|
340
|
-
}
|
|
341
|
-
|
|
342
|
-
void hvx_add_f32_opt(const uint8_t * restrict src0,
|
|
343
|
-
const uint8_t * restrict src1,
|
|
344
|
-
uint8_t * restrict dst,
|
|
345
|
-
const int num_elems) {
|
|
346
|
-
htp_binary_ops_preamble;
|
|
347
|
-
|
|
348
|
-
for (int i = 0; i < step_of_4; i++) {
|
|
349
|
-
HVX_Vector v1a = *(HVX_Vector *) src0_curr;
|
|
350
|
-
|
|
351
|
-
HVX_Vector v1b = *(HVX_Vector *) src1_curr;
|
|
352
|
-
|
|
353
|
-
HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
|
|
354
|
-
|
|
355
|
-
HVX_Vector v1 = Q6_Vqf32_vadd_VsfVsf(v1a, v1b);
|
|
356
|
-
|
|
357
|
-
HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
|
|
358
|
-
|
|
359
|
-
HVX_Vector v3a = *(HVX_Vector *) (src0_curr + 2 * VLEN);
|
|
360
|
-
|
|
361
|
-
HVX_Vector v2 = Q6_Vqf32_vadd_VsfVsf(v2a, v2b);
|
|
362
|
-
|
|
363
|
-
*(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
|
|
364
|
-
|
|
365
|
-
HVX_Vector v3b = *(HVX_Vector *) (src1_curr + 2 * VLEN);
|
|
366
|
-
|
|
367
|
-
HVX_Vector v4a = *(HVX_Vector *) (src0_curr + 3 * VLEN);
|
|
368
|
-
|
|
369
|
-
src0_curr += 4 * VLEN;
|
|
370
|
-
|
|
371
|
-
HVX_Vector v3 = Q6_Vqf32_vadd_VsfVsf(v3a, v3b);
|
|
372
|
-
|
|
373
|
-
*(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
|
|
374
|
-
|
|
375
|
-
HVX_Vector v4b = *(HVX_Vector *) (src1_curr + 3 * VLEN);
|
|
376
|
-
|
|
377
|
-
*(HVX_Vector *) (dst_curr + 2 * VLEN) = Q6_Vsf_equals_Vqf32(v3);
|
|
378
|
-
|
|
379
|
-
HVX_Vector v4 = Q6_Vqf32_vadd_VsfVsf(v4a, v4b);
|
|
380
|
-
|
|
381
|
-
src1_curr += 4 * VLEN;
|
|
382
|
-
|
|
383
|
-
*(HVX_Vector *) (dst_curr + 3 * VLEN) = Q6_Vsf_equals_Vqf32(v4);
|
|
384
|
-
|
|
385
|
-
dst_curr += 4 * VLEN;
|
|
386
|
-
}
|
|
387
|
-
for (int i = 0; i < step_of_2; i++) {
|
|
388
|
-
HVX_Vector v1a = *(HVX_Vector *) src0_curr;
|
|
389
|
-
|
|
390
|
-
HVX_Vector v1b = *(HVX_Vector *) src1_curr;
|
|
391
|
-
|
|
392
|
-
HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
|
|
393
|
-
|
|
394
|
-
HVX_Vector v1 = Q6_Vqf32_vadd_VsfVsf(v1a, v1b);
|
|
395
|
-
|
|
396
|
-
HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
|
|
397
|
-
|
|
398
|
-
*(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
|
|
399
|
-
|
|
400
|
-
src0_curr += 2 * VLEN;
|
|
401
|
-
|
|
402
|
-
HVX_Vector v2 = Q6_Vqf32_vadd_VsfVsf(v2a, v2b);
|
|
403
|
-
|
|
404
|
-
src1_curr += 2 * VLEN;
|
|
405
|
-
|
|
406
|
-
*(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
|
|
407
|
-
|
|
408
|
-
dst_curr += 2 * VLEN;
|
|
409
|
-
}
|
|
410
|
-
for (int i = 0; i < step_of_1; i++) {
|
|
411
|
-
HVX_Vector va = *(HVX_Vector *) src0_curr;
|
|
412
|
-
|
|
413
|
-
src0_curr += VLEN;
|
|
414
|
-
|
|
415
|
-
HVX_Vector vb = *(HVX_Vector *) src1_curr;
|
|
416
|
-
|
|
417
|
-
src1_curr += VLEN;
|
|
418
|
-
|
|
419
|
-
HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(va, vb);
|
|
420
|
-
|
|
421
|
-
*(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v);
|
|
422
|
-
|
|
423
|
-
dst_curr += VLEN;
|
|
424
|
-
}
|
|
425
|
-
if (remaining > 0) {
|
|
426
|
-
HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr);
|
|
427
|
-
hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v));
|
|
428
|
-
}
|
|
429
|
-
}
|
|
430
|
-
|
|
431
|
-
void hvx_add_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) {
|
|
432
|
-
size_t left_over = num_elems & (VLEN_FP32 - 1);
|
|
433
|
-
size_t num_elems_whole = num_elems - left_over;
|
|
434
|
-
|
|
435
|
-
int unaligned_addr = 0;
|
|
436
|
-
int unaligned_loop = 0;
|
|
437
|
-
if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) {
|
|
438
|
-
FARF(HIGH, "hvx_add_scalar_f32: unaligned address in hvx op, possibly slower execution\n");
|
|
439
|
-
unaligned_addr = 1;
|
|
440
|
-
}
|
|
441
|
-
|
|
442
|
-
if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
|
|
443
|
-
unaligned_loop = 1;
|
|
444
|
-
FARF(HIGH, "hvx_add_scalar_f32: unaligned loop in hvx op, possibly slower execution\n");
|
|
445
|
-
}
|
|
446
|
-
|
|
447
|
-
static const float kInf = INFINITY;
|
|
448
|
-
const HVX_Vector inf = hvx_vec_splat_fp32(kInf);
|
|
449
|
-
HVX_Vector val_vec = hvx_vec_splat_fp32(val);
|
|
450
|
-
|
|
451
|
-
if (0 == unaligned_loop) {
|
|
452
|
-
HVX_Vector * restrict vec_in1 = (HVX_Vector *) src;
|
|
453
|
-
HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
|
|
454
|
-
|
|
455
|
-
#pragma unroll(4)
|
|
456
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
457
|
-
HVX_Vector in = *vec_in1++;
|
|
458
|
-
const HVX_VectorPred pred_inf = Q6_Q_vcmp_eq_VwVw(inf, in);
|
|
459
|
-
HVX_Vector v = Q6_Vqf32_vadd_VsfVsf(in, val_vec);
|
|
460
|
-
v = Q6_Vsf_equals_Vqf32(v);
|
|
461
|
-
v = Q6_V_vmux_QVV(pred_inf, inf, v);
|
|
462
|
-
*vec_out++ = v;
|
|
463
|
-
}
|
|
464
|
-
} else {
|
|
465
|
-
#pragma unroll(4)
|
|
466
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
467
|
-
HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32);
|
|
468
|
-
|
|
469
|
-
const HVX_VectorPred pred_inf = Q6_Q_vcmp_eq_VwVw(inf, in);
|
|
470
|
-
HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in, val_vec);
|
|
471
|
-
out = Q6_Vsf_equals_Vqf32(out);
|
|
472
|
-
out = Q6_V_vmux_QVV(pred_inf, inf, out);
|
|
473
|
-
|
|
474
|
-
*(HVX_UVector *) (dst + i * SIZEOF_FP32) = out;
|
|
475
|
-
}
|
|
476
|
-
}
|
|
477
|
-
|
|
478
|
-
if (left_over > 0) {
|
|
479
|
-
const float * srcf = (const float *) src + num_elems_whole;
|
|
480
|
-
float * dstf = (float *) dst + num_elems_whole;
|
|
481
|
-
|
|
482
|
-
HVX_Vector in = *(HVX_UVector *) srcf;
|
|
483
|
-
|
|
484
|
-
const HVX_VectorPred pred_inf = Q6_Q_vcmp_eq_VwVw(inf, in);
|
|
485
|
-
HVX_Vector out = Q6_Vqf32_vadd_VsfVsf(in, val_vec);
|
|
486
|
-
out = Q6_Vsf_equals_Vqf32(out);
|
|
487
|
-
out = Q6_V_vmux_QVV(pred_inf, inf, out);
|
|
488
|
-
|
|
489
|
-
hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, out);
|
|
490
|
-
}
|
|
491
|
-
}
|
|
492
|
-
|
|
493
|
-
void hvx_mul_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) {
|
|
494
|
-
size_t left_over = num_elems & (VLEN_FP32 - 1);
|
|
495
|
-
size_t num_elems_whole = num_elems - left_over;
|
|
496
|
-
|
|
497
|
-
int unaligned_addr = 0;
|
|
498
|
-
int unaligned_loop = 0;
|
|
499
|
-
if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) {
|
|
500
|
-
FARF(HIGH, "hvx_mul_scalar_f32: unaligned address in hvx op, possibly slower execution\n");
|
|
501
|
-
unaligned_addr = 1;
|
|
502
|
-
}
|
|
503
|
-
|
|
504
|
-
if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
|
|
505
|
-
unaligned_loop = 1;
|
|
506
|
-
FARF(HIGH, "hvx_mul_scalar_f32: unaligned loop in hvx op, possibly slower execution\n");
|
|
507
|
-
}
|
|
508
|
-
|
|
509
|
-
HVX_Vector val_vec = hvx_vec_splat_fp32(val);
|
|
510
|
-
bool handled_leftover = false;
|
|
511
|
-
if (0 == unaligned_loop) {
|
|
512
|
-
HVX_Vector * restrict vec_in1 = (HVX_Vector *) src;
|
|
513
|
-
HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
|
|
514
|
-
|
|
515
|
-
#pragma unroll(4)
|
|
516
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
517
|
-
HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1++, val_vec);
|
|
518
|
-
*vec_out++ = Q6_Vsf_equals_Vqf32(v);
|
|
519
|
-
}
|
|
520
|
-
} else {
|
|
521
|
-
int step_of_1 = num_elems >> 5; // divby 32, because 32 float = 128 bytes per HVX vector
|
|
522
|
-
int leftover_size = left_over * sizeof(float);
|
|
523
|
-
|
|
524
|
-
HVX_Vector * input_v_ptr = (HVX_Vector *) src;
|
|
525
|
-
HVX_UVector * output_v_ptr = (HVX_UVector *) dst;
|
|
526
|
-
|
|
527
|
-
HVX_Vector slinep;
|
|
528
|
-
HVX_Vector slinec;
|
|
529
|
-
HVX_Vector sline;
|
|
530
|
-
|
|
531
|
-
slinep = *input_v_ptr++;
|
|
532
|
-
|
|
533
|
-
#pragma unroll(4)
|
|
534
|
-
for (int i = step_of_1 - 1; i > 0; i--) {
|
|
535
|
-
slinec = *input_v_ptr++;
|
|
536
|
-
sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src);
|
|
537
|
-
*((HVX_UVector *) (output_v_ptr++)) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, val_vec));
|
|
538
|
-
/* Prepare slinep for next iteration */
|
|
539
|
-
slinep = slinec;
|
|
540
|
-
}
|
|
541
|
-
|
|
542
|
-
if (step_of_1 > 0) {
|
|
543
|
-
slinec = htp_is_aligned(input_v_ptr, VLEN) && left_over == 0 ? slinep : *input_v_ptr++;
|
|
544
|
-
sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src);
|
|
545
|
-
*((HVX_UVector *) (output_v_ptr++)) = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, val_vec));
|
|
546
|
-
|
|
547
|
-
slinep = slinec;
|
|
548
|
-
}
|
|
549
|
-
|
|
550
|
-
if (leftover_size > 0) {
|
|
551
|
-
slinec = (is_in_one_chunk(input_v_ptr, leftover_size, VLEN) ? slinep : *input_v_ptr++);
|
|
552
|
-
|
|
553
|
-
sline = Q6_V_valign_VVR(slinec, slinep, (size_t) src);
|
|
554
|
-
|
|
555
|
-
HVX_Vector sout = Q6_Vsf_equals_Vqf32(Q6_Vqf32_vmpy_VsfVsf(sline, val_vec));
|
|
556
|
-
hvx_vec_store_u(output_v_ptr, leftover_size, sout);
|
|
557
|
-
handled_leftover = true;
|
|
558
|
-
}
|
|
559
|
-
}
|
|
560
|
-
|
|
561
|
-
if (left_over > 0 && !handled_leftover) {
|
|
562
|
-
const float * srcf = (const float *) src + num_elems_whole;
|
|
563
|
-
float * dstf = (float *) dst + num_elems_whole;
|
|
564
|
-
|
|
565
|
-
HVX_Vector in = *(HVX_UVector *) srcf;
|
|
566
|
-
|
|
567
|
-
HVX_Vector out = Q6_Vqf32_vmpy_VsfVsf(in, val_vec);
|
|
568
|
-
hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out));
|
|
569
|
-
}
|
|
570
|
-
}
|
|
571
|
-
|
|
572
|
-
void hvx_sub_f32(const uint8_t * restrict src0,
|
|
573
|
-
const uint8_t * restrict src1,
|
|
574
|
-
uint8_t * restrict dst,
|
|
575
|
-
const int num_elems) {
|
|
576
|
-
size_t left_over = num_elems & (VLEN_FP32 - 1);
|
|
577
|
-
size_t num_elems_whole = num_elems - left_over;
|
|
578
|
-
|
|
579
|
-
int unaligned_addr = 0;
|
|
580
|
-
int unaligned_loop = 0;
|
|
581
|
-
if ((0 == htp_is_aligned((void *) src0, VLEN)) || (0 == htp_is_aligned((void *) src1, VLEN)) ||
|
|
582
|
-
(0 == htp_is_aligned((void *) dst, VLEN))) {
|
|
583
|
-
FARF(HIGH, "hvx_sub_f32: unaligned address in hvx op, possibly slower execution\n");
|
|
584
|
-
unaligned_addr = 1;
|
|
585
|
-
}
|
|
586
|
-
|
|
587
|
-
if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
|
|
588
|
-
unaligned_loop = 1;
|
|
589
|
-
FARF(HIGH, "hvx_sub_f32: unaligned loop in hvx op, possibly slower execution\n");
|
|
590
|
-
}
|
|
591
|
-
|
|
592
|
-
if (0 == unaligned_loop) {
|
|
593
|
-
HVX_Vector * restrict vec_in1 = (HVX_Vector *) src0;
|
|
594
|
-
HVX_Vector * restrict vec_in2 = (HVX_Vector *) src1;
|
|
595
|
-
HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
|
|
596
|
-
|
|
597
|
-
#pragma unroll(4)
|
|
598
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
599
|
-
HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(*vec_in1++, *vec_in2++);
|
|
600
|
-
*vec_out++ = Q6_Vsf_equals_Vqf32(v);
|
|
601
|
-
}
|
|
602
|
-
} else {
|
|
603
|
-
#pragma unroll(4)
|
|
604
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
605
|
-
HVX_Vector in1 = *(HVX_UVector *) (src0 + i * SIZEOF_FP32);
|
|
606
|
-
HVX_Vector in2 = *(HVX_UVector *) (src1 + i * SIZEOF_FP32);
|
|
607
|
-
|
|
608
|
-
HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in1, in2);
|
|
609
|
-
|
|
610
|
-
*(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out);
|
|
611
|
-
}
|
|
612
|
-
}
|
|
613
|
-
|
|
614
|
-
if (left_over > 0) {
|
|
615
|
-
const float * src0f = (const float *) src0 + num_elems_whole;
|
|
616
|
-
const float * src1f = (const float *) src1 + num_elems_whole;
|
|
617
|
-
float * dstf = (float *) dst + num_elems_whole;
|
|
618
|
-
|
|
619
|
-
HVX_Vector in1 = *(HVX_UVector *) src0f;
|
|
620
|
-
HVX_Vector in2 = *(HVX_UVector *) src1f;
|
|
621
|
-
|
|
622
|
-
HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in1, in2);
|
|
623
|
-
hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out));
|
|
624
|
-
}
|
|
625
|
-
}
|
|
626
|
-
|
|
627
|
-
void hvx_sub_f32_opt(const uint8_t * restrict src0,
|
|
628
|
-
const uint8_t * restrict src1,
|
|
629
|
-
uint8_t * restrict dst,
|
|
630
|
-
const int num_elems) {
|
|
631
|
-
htp_binary_ops_preamble;
|
|
632
|
-
|
|
633
|
-
for (int i = 0; i < step_of_4; i++) {
|
|
634
|
-
HVX_Vector v1a = *(HVX_Vector *) src0_curr;
|
|
635
|
-
|
|
636
|
-
HVX_Vector v1b = *(HVX_Vector *) src1_curr;
|
|
637
|
-
|
|
638
|
-
HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
|
|
639
|
-
|
|
640
|
-
HVX_Vector v1 = Q6_Vqf32_vsub_VsfVsf(v1a, v1b);
|
|
641
|
-
|
|
642
|
-
HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
|
|
643
|
-
|
|
644
|
-
HVX_Vector v3a = *(HVX_Vector *) (src0_curr + 2 * VLEN);
|
|
645
|
-
|
|
646
|
-
HVX_Vector v2 = Q6_Vqf32_vsub_VsfVsf(v2a, v2b);
|
|
647
|
-
|
|
648
|
-
*(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
|
|
649
|
-
|
|
650
|
-
HVX_Vector v3b = *(HVX_Vector *) (src1_curr + 2 * VLEN);
|
|
651
|
-
|
|
652
|
-
HVX_Vector v4a = *(HVX_Vector *) (src0_curr + 3 * VLEN);
|
|
653
|
-
|
|
654
|
-
src0_curr += 4 * VLEN;
|
|
655
|
-
|
|
656
|
-
HVX_Vector v3 = Q6_Vqf32_vsub_VsfVsf(v3a, v3b);
|
|
657
|
-
|
|
658
|
-
*(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
|
|
659
|
-
|
|
660
|
-
HVX_Vector v4b = *(HVX_Vector *) (src1_curr + 3 * VLEN);
|
|
661
|
-
|
|
662
|
-
*(HVX_Vector *) (dst_curr + 2 * VLEN) = Q6_Vsf_equals_Vqf32(v3);
|
|
663
|
-
|
|
664
|
-
HVX_Vector v4 = Q6_Vqf32_vsub_VsfVsf(v4a, v4b);
|
|
665
|
-
|
|
666
|
-
src1_curr += 4 * VLEN;
|
|
667
|
-
|
|
668
|
-
*(HVX_Vector *) (dst_curr + 3 * VLEN) = Q6_Vsf_equals_Vqf32(v4);
|
|
669
|
-
|
|
670
|
-
dst_curr += 4 * VLEN;
|
|
671
|
-
}
|
|
672
|
-
for (int i = 0; i < step_of_2; i++) {
|
|
673
|
-
HVX_Vector v1a = *(HVX_Vector *) src0_curr;
|
|
674
|
-
|
|
675
|
-
HVX_Vector v1b = *(HVX_Vector *) src1_curr;
|
|
676
|
-
|
|
677
|
-
HVX_Vector v2a = *(HVX_Vector *) (src0_curr + VLEN);
|
|
678
|
-
|
|
679
|
-
HVX_Vector v1 = Q6_Vqf32_vsub_VsfVsf(v1a, v1b);
|
|
680
|
-
|
|
681
|
-
HVX_Vector v2b = *(HVX_Vector *) (src1_curr + VLEN);
|
|
682
|
-
|
|
683
|
-
*(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v1);
|
|
684
|
-
|
|
685
|
-
src0_curr += 2 * VLEN;
|
|
686
|
-
|
|
687
|
-
HVX_Vector v2 = Q6_Vqf32_vsub_VsfVsf(v2a, v2b);
|
|
688
|
-
|
|
689
|
-
src1_curr += 2 * VLEN;
|
|
690
|
-
|
|
691
|
-
*(HVX_Vector *) (dst_curr + VLEN) = Q6_Vsf_equals_Vqf32(v2);
|
|
692
|
-
|
|
693
|
-
dst_curr += 2 * VLEN;
|
|
694
|
-
}
|
|
695
|
-
for (int i = 0; i < step_of_1; i++) {
|
|
696
|
-
HVX_Vector va = *(HVX_Vector *) src0_curr;
|
|
697
|
-
|
|
698
|
-
src0_curr += VLEN;
|
|
699
|
-
|
|
700
|
-
HVX_Vector vb = *(HVX_Vector *) src1_curr;
|
|
701
|
-
|
|
702
|
-
src1_curr += VLEN;
|
|
703
|
-
|
|
704
|
-
HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(va, vb);
|
|
705
|
-
|
|
706
|
-
*(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v);
|
|
707
|
-
|
|
708
|
-
dst_curr += VLEN;
|
|
709
|
-
}
|
|
710
|
-
if (remaining > 0) {
|
|
711
|
-
HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(*(HVX_Vector *) src0_curr, *(HVX_Vector *) src1_curr);
|
|
712
|
-
hvx_vec_store_u((void *) dst_curr, remaining * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(v));
|
|
713
|
-
}
|
|
714
|
-
}
|
|
715
|
-
|
|
716
|
-
void hvx_sub_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) {
|
|
717
|
-
size_t left_over = num_elems & (VLEN_FP32 - 1);
|
|
718
|
-
size_t num_elems_whole = num_elems - left_over;
|
|
719
|
-
|
|
720
|
-
int unaligned_addr = 0;
|
|
721
|
-
int unaligned_loop = 0;
|
|
722
|
-
if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) {
|
|
723
|
-
FARF(HIGH, "hvx_sub_scalar_f32: unaligned address in hvx op, possibly slower execution\n");
|
|
724
|
-
unaligned_addr = 1;
|
|
725
|
-
}
|
|
726
|
-
|
|
727
|
-
if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
|
|
728
|
-
unaligned_loop = 1;
|
|
729
|
-
FARF(HIGH, "hvx_sub_scalar_f32: unaligned loop in hvx op, possibly slower execution\n");
|
|
730
|
-
}
|
|
731
|
-
|
|
732
|
-
HVX_Vector val_vec = hvx_vec_splat_fp32(val);
|
|
733
|
-
|
|
734
|
-
if (0 == unaligned_loop) {
|
|
735
|
-
HVX_Vector * restrict vec_in1 = (HVX_Vector *) src;
|
|
736
|
-
HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
|
|
737
|
-
|
|
738
|
-
#pragma unroll(4)
|
|
739
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
740
|
-
HVX_Vector v = Q6_Vqf32_vsub_VsfVsf(*vec_in1++, val_vec);
|
|
741
|
-
*vec_out++ = Q6_Vsf_equals_Vqf32(v);
|
|
742
|
-
}
|
|
743
|
-
} else {
|
|
744
|
-
#pragma unroll(4)
|
|
745
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
746
|
-
HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32);
|
|
747
|
-
|
|
748
|
-
HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in, val_vec);
|
|
749
|
-
|
|
750
|
-
*(HVX_UVector *) (dst + i * SIZEOF_FP32) = Q6_Vsf_equals_Vqf32(out);
|
|
751
|
-
}
|
|
752
|
-
}
|
|
753
|
-
|
|
754
|
-
if (left_over > 0) {
|
|
755
|
-
const float * srcf = (const float *) src + num_elems_whole;
|
|
756
|
-
float * dstf = (float *) dst + num_elems_whole;
|
|
757
|
-
|
|
758
|
-
HVX_Vector in = *(HVX_UVector *) srcf;
|
|
759
|
-
|
|
760
|
-
HVX_Vector out = Q6_Vqf32_vsub_VsfVsf(in, val_vec);
|
|
761
|
-
hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, Q6_Vsf_equals_Vqf32(out));
|
|
762
|
-
}
|
|
763
|
-
}
|
|
764
|
-
|
|
765
|
-
float hvx_sum_of_squares_f32(const uint8_t * restrict src, const int num_elems) {
|
|
766
|
-
int left_over = num_elems & (VLEN_FP32 - 1);
|
|
767
|
-
int num_elems_whole = num_elems - left_over;
|
|
768
|
-
|
|
769
|
-
if (0 == htp_is_aligned((void *) src, VLEN)) {
|
|
770
|
-
FARF(HIGH, "hvx_sum_of_squares_f32: unaligned address in hvx op, possibly slower execution\n");
|
|
771
|
-
}
|
|
772
|
-
|
|
773
|
-
assert((1 == htp_is_aligned((void *) src, VLEN)) || (0 == num_elems_whole));
|
|
774
|
-
|
|
775
|
-
HVX_Vector * restrict vec_in1 = (HVX_Vector *) src;
|
|
776
|
-
|
|
777
|
-
HVX_Vector sum_vec_acc = Q6_V_vsplat_R(0x00000000);
|
|
778
|
-
HVX_Vector zero_vec = Q6_V_vsplat_R(0x00000000);
|
|
779
|
-
|
|
780
|
-
#pragma unroll(4)
|
|
781
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
782
|
-
HVX_Vector v = Q6_Vqf32_vmpy_VsfVsf(*vec_in1, *vec_in1);
|
|
783
|
-
sum_vec_acc = Q6_Vqf32_vadd_Vqf32Vqf32(sum_vec_acc, v);
|
|
784
|
-
vec_in1++;
|
|
785
|
-
}
|
|
786
|
-
|
|
787
|
-
if (left_over > 0) {
|
|
788
|
-
const float * srcf = (const float *) src + num_elems_whole;
|
|
789
|
-
|
|
790
|
-
HVX_Vector vec_left = *(HVX_UVector *) srcf;
|
|
791
|
-
|
|
792
|
-
HVX_Vector vec_left_sq = Q6_Vqf32_vmpy_VsfVsf(vec_left, vec_left);
|
|
793
|
-
HVX_Vector vec_tmp = Q6_V_valign_VVR(vec_left_sq, zero_vec, left_over * SIZEOF_FP32);
|
|
794
|
-
|
|
795
|
-
sum_vec_acc = Q6_Vqf32_vadd_Vqf32Vqf32(sum_vec_acc, vec_tmp);
|
|
796
|
-
}
|
|
797
|
-
|
|
798
|
-
HVX_Vector v = hvx_vec_qf32_reduce_sum(sum_vec_acc);
|
|
799
|
-
return hvx_vec_get_fp32(Q6_Vsf_equals_Vqf32(v));
|
|
800
|
-
}
|
|
801
|
-
|
|
802
|
-
float hvx_self_sum_f32(const uint8_t * restrict src, const int num_elems) {
|
|
803
|
-
int left_over = num_elems & (VLEN_FP32 - 1);
|
|
804
|
-
int num_elems_whole = num_elems - left_over;
|
|
805
|
-
|
|
806
|
-
int unaligned_addr = 0;
|
|
807
|
-
int unaligned_loop = 0;
|
|
808
|
-
if (0 == htp_is_aligned((void *) src, VLEN)) {
|
|
809
|
-
FARF(HIGH, "hvx_self_sum_f32: unaligned address in hvx op, possibly slower execution\n");
|
|
810
|
-
unaligned_addr = 1;
|
|
811
|
-
}
|
|
812
|
-
|
|
813
|
-
if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
|
|
814
|
-
unaligned_loop = 1;
|
|
815
|
-
FARF(HIGH, "hvx_self_sum_f32: unaligned loop in hvx op, possibly slower execution\n");
|
|
816
|
-
}
|
|
817
|
-
|
|
818
|
-
HVX_Vector sum_vec = Q6_V_vsplat_R(0x00000000);
|
|
819
|
-
HVX_Vector zero_vec = Q6_V_vsplat_R(0x00000000);
|
|
820
|
-
|
|
821
|
-
if (0 == unaligned_loop) {
|
|
822
|
-
HVX_Vector * vec_in = (HVX_Vector *) src;
|
|
823
|
-
|
|
824
|
-
#pragma unroll(4)
|
|
825
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
826
|
-
// sum_vec = Q6_Vqf32_vadd_Vqf32Vsf(sum_vec, *vec_in++);
|
|
827
|
-
sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), *vec_in++);
|
|
828
|
-
}
|
|
829
|
-
} else {
|
|
830
|
-
#pragma unroll(4)
|
|
831
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
832
|
-
HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32);
|
|
833
|
-
|
|
834
|
-
sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), in);
|
|
835
|
-
}
|
|
836
|
-
}
|
|
837
|
-
|
|
838
|
-
if (left_over > 0) {
|
|
839
|
-
const float * srcf = (const float *) src + num_elems_whole;
|
|
840
|
-
|
|
841
|
-
HVX_Vector vec_left = *(HVX_UVector *) srcf;
|
|
842
|
-
HVX_Vector vec_tmp = Q6_V_valign_VVR(vec_left, zero_vec, left_over * SIZEOF_FP32);
|
|
843
|
-
// sum_vec = Q6_Vqf32_vadd_Vqf32Vsf(sum_vec, vec_tmp);
|
|
844
|
-
sum_vec = Q6_Vqf32_vadd_VsfVsf(Q6_Vsf_equals_Vqf32(sum_vec), vec_tmp);
|
|
845
|
-
}
|
|
846
|
-
|
|
847
|
-
HVX_Vector v = hvx_vec_qf32_reduce_sum(sum_vec);
|
|
848
|
-
return hvx_vec_get_fp32(Q6_Vsf_equals_Vqf32(v));
|
|
849
|
-
}
|
|
850
|
-
|
|
851
|
-
float hvx_self_max_f32(const uint8_t * restrict src, const int num_elems) {
|
|
852
|
-
int left_over = num_elems & (VLEN_FP32 - 1);
|
|
853
|
-
int num_elems_whole = num_elems - left_over;
|
|
854
|
-
|
|
855
|
-
int unaligned_addr = 0;
|
|
856
|
-
int unaligned_loop = 0;
|
|
857
|
-
if (0 == htp_is_aligned((void *) src, VLEN)) {
|
|
858
|
-
FARF(HIGH, "hvx_self_max_f32: unaligned address in hvx op, possibly slower execution\n");
|
|
859
|
-
unaligned_addr = 1;
|
|
860
|
-
}
|
|
861
|
-
|
|
862
|
-
if ((1 == unaligned_addr) && (num_elems_whole != 0)) {
|
|
863
|
-
unaligned_loop = 1;
|
|
864
|
-
FARF(HIGH, "hvx_self_max_f32: unaligned loop in hvx op, possibly slower execution\n");
|
|
865
|
-
}
|
|
866
|
-
|
|
867
|
-
HVX_Vector vec_max = hvx_vec_splat_fp32(((const float *) src)[0]);
|
|
868
|
-
HVX_Vector vec_first = hvx_vec_splat_fp32(((const float *) src)[0]);
|
|
869
|
-
|
|
870
|
-
if (0 == unaligned_loop) {
|
|
871
|
-
HVX_Vector * restrict vec_in = (HVX_Vector *) src;
|
|
872
|
-
|
|
873
|
-
#pragma unroll(4)
|
|
874
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
875
|
-
vec_max = Q6_Vsf_vmax_VsfVsf(vec_max, *vec_in++);
|
|
876
|
-
}
|
|
877
|
-
} else {
|
|
878
|
-
#pragma unroll(4)
|
|
879
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
880
|
-
HVX_Vector in = *(HVX_UVector *) (src + i * SIZEOF_FP32);
|
|
881
|
-
|
|
882
|
-
vec_max = Q6_Vsf_vmax_VsfVsf(vec_max, in);
|
|
883
|
-
}
|
|
884
|
-
}
|
|
885
|
-
|
|
886
|
-
if (left_over > 0) {
|
|
887
|
-
const float * srcf = (const float *) src + num_elems_whole;
|
|
888
|
-
|
|
889
|
-
HVX_Vector in = *(HVX_UVector *) srcf;
|
|
890
|
-
|
|
891
|
-
HVX_Vector temp = Q6_V_valign_VVR(in, vec_first, left_over * SIZEOF_FP32);
|
|
892
|
-
vec_max = Q6_Vsf_vmax_VsfVsf(vec_max, temp);
|
|
893
|
-
}
|
|
894
|
-
|
|
895
|
-
HVX_Vector v = hvx_vec_reduce_max_fp32(vec_max);
|
|
896
|
-
return hvx_vec_get_fp32(v);
|
|
897
|
-
}
|
|
898
|
-
|
|
899
|
-
void hvx_min_scalar_f32(const uint8_t * restrict src, const float val, uint8_t * restrict dst, const int num_elems) {
|
|
900
|
-
size_t left_over = num_elems & (VLEN_FP32 - 1);
|
|
901
|
-
size_t num_elems_whole = num_elems - left_over;
|
|
902
|
-
int unalign_address = 0;
|
|
903
|
-
if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) {
|
|
904
|
-
FARF(HIGH, "hvx_min_scalar_f32: unaligned address in hvx op, possibly slower execution\n");
|
|
905
|
-
unalign_address = 1;
|
|
906
|
-
}
|
|
907
|
-
|
|
908
|
-
const float * src_f = (const float *) src;
|
|
909
|
-
|
|
910
|
-
HVX_Vector vec_min = hvx_vec_splat_fp32(val);
|
|
911
|
-
|
|
912
|
-
if(unalign_address == 0){
|
|
913
|
-
HVX_Vector * restrict vec_in = (HVX_Vector *) src;
|
|
914
|
-
HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
|
|
915
|
-
|
|
916
|
-
#pragma unroll(4)
|
|
917
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
918
|
-
HVX_Vector min_clamp = Q6_Vsf_vmin_VsfVsf(vec_min, *vec_in++);
|
|
919
|
-
*vec_out++ = (min_clamp);
|
|
920
|
-
}
|
|
921
|
-
}else{
|
|
922
|
-
HVX_UVector * restrict vec_in = (HVX_Vector *) src;
|
|
923
|
-
HVX_UVector * restrict vec_out = (HVX_Vector *) dst;
|
|
924
|
-
|
|
925
|
-
#pragma unroll(4)
|
|
926
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
927
|
-
HVX_Vector min_clamp = Q6_Vsf_vmin_VsfVsf(vec_min, *vec_in++);
|
|
928
|
-
*vec_out++ = (min_clamp);
|
|
929
|
-
}
|
|
930
|
-
}
|
|
931
|
-
|
|
932
|
-
if (left_over > 0 ) {
|
|
933
|
-
const float * srcf = (const float *) src + num_elems_whole;
|
|
934
|
-
float * dstf = (float *) dst + num_elems_whole;
|
|
935
|
-
|
|
936
|
-
HVX_UVector in = *(HVX_UVector *) srcf;
|
|
937
|
-
|
|
938
|
-
HVX_UVector min_clamp = Q6_Vsf_vmin_VsfVsf(vec_min, in);
|
|
939
|
-
|
|
940
|
-
hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, (min_clamp));
|
|
941
|
-
}
|
|
942
|
-
}
|
|
943
|
-
|
|
944
|
-
void hvx_clamp_scalar_f32(const uint8_t * restrict src,
|
|
945
|
-
const float limit_left,
|
|
946
|
-
const float limit_right,
|
|
947
|
-
uint8_t * restrict dst,
|
|
948
|
-
const int num_elems) {
|
|
949
|
-
size_t left_over = num_elems & (VLEN_FP32 - 1);
|
|
950
|
-
size_t num_elems_whole = num_elems - left_over;
|
|
951
|
-
|
|
952
|
-
int unalign_address = 0;
|
|
953
|
-
if ((0 == htp_is_aligned((void *) src, VLEN)) || (0 == htp_is_aligned((void *) dst, VLEN))) {
|
|
954
|
-
FARF(HIGH, "hvx_clamp_scalar_f32: unaligned address in hvx op, possibly slower execution\n");
|
|
955
|
-
unalign_address = 1;
|
|
956
|
-
}
|
|
957
|
-
|
|
958
|
-
HVX_Vector range_left = hvx_vec_splat_fp32(limit_left);
|
|
959
|
-
HVX_Vector range_right = hvx_vec_splat_fp32(limit_right);
|
|
960
|
-
|
|
961
|
-
if(unalign_address == 0){
|
|
962
|
-
HVX_Vector * restrict vec_in = (HVX_Vector *) src;
|
|
963
|
-
HVX_Vector * restrict vec_out = (HVX_Vector *) dst;
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
#pragma unroll(4)
|
|
968
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
969
|
-
HVX_Vector in_vec = *vec_in++;
|
|
970
|
-
HVX_Vector temp_v = in_vec;
|
|
971
|
-
|
|
972
|
-
HVX_VectorPred pred_cap_right = Q6_Q_vcmp_gt_VsfVsf(in_vec, range_right);
|
|
973
|
-
HVX_VectorPred pred_cap_left = Q6_Q_vcmp_gt_VsfVsf(range_left, in_vec);
|
|
974
|
-
|
|
975
|
-
in_vec = Q6_V_vmux_QVV(pred_cap_right, range_right, temp_v);
|
|
976
|
-
in_vec = Q6_V_vmux_QVV(pred_cap_left, range_left, in_vec);
|
|
977
|
-
|
|
978
|
-
*vec_out++ = in_vec;
|
|
979
|
-
}
|
|
980
|
-
|
|
981
|
-
}else{
|
|
982
|
-
|
|
983
|
-
HVX_UVector * restrict vec_in = (HVX_UVector *) src;
|
|
984
|
-
HVX_UVector * restrict vec_out = (HVX_UVector *) dst;
|
|
985
|
-
|
|
986
|
-
#pragma unroll(4)
|
|
987
|
-
for (int i = 0; i < num_elems_whole; i += VLEN_FP32) {
|
|
988
|
-
HVX_Vector in_vec = *vec_in++;
|
|
989
|
-
HVX_Vector temp_v = in_vec;
|
|
990
|
-
|
|
991
|
-
HVX_VectorPred pred_cap_right = Q6_Q_vcmp_gt_VsfVsf(in_vec, range_right);
|
|
992
|
-
HVX_VectorPred pred_cap_left = Q6_Q_vcmp_gt_VsfVsf(range_left, in_vec);
|
|
993
|
-
|
|
994
|
-
in_vec = Q6_V_vmux_QVV(pred_cap_right, range_right, temp_v);
|
|
995
|
-
in_vec = Q6_V_vmux_QVV(pred_cap_left, range_left, in_vec);
|
|
996
|
-
|
|
997
|
-
*vec_out++ = in_vec;
|
|
998
|
-
}
|
|
999
|
-
|
|
1000
|
-
}
|
|
1001
|
-
|
|
1002
|
-
if (left_over > 0) {
|
|
1003
|
-
const float * srcf = (const float *) src + num_elems_whole;
|
|
1004
|
-
float * dstf = (float *) dst + num_elems_whole;
|
|
1005
|
-
|
|
1006
|
-
HVX_Vector in_vec = *(HVX_UVector *) srcf;
|
|
1007
|
-
|
|
1008
|
-
HVX_Vector temp_v = in_vec;
|
|
1009
|
-
|
|
1010
|
-
HVX_VectorPred pred_cap_right = Q6_Q_vcmp_gt_VsfVsf(in_vec, range_right);
|
|
1011
|
-
HVX_VectorPred pred_cap_left = Q6_Q_vcmp_gt_VsfVsf(range_left, in_vec);
|
|
1012
|
-
|
|
1013
|
-
in_vec = Q6_V_vmux_QVV(pred_cap_right, range_right, temp_v);
|
|
1014
|
-
in_vec = Q6_V_vmux_QVV(pred_cap_left, range_left, in_vec);
|
|
1015
|
-
|
|
1016
|
-
hvx_vec_store_u((void *) dstf, left_over * SIZEOF_FP32, in_vec);
|
|
1017
|
-
}
|
|
1018
|
-
}
|
|
1019
|
-
|
|
1020
|
-
|