whispercpp 1.3.5 → 1.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +1 -1
- data/README.md +99 -2
- data/ext/extconf.rb +1 -0
- data/ext/ruby_whisper.c +20 -4
- data/ext/ruby_whisper.h +30 -2
- data/ext/ruby_whisper_context.c +216 -124
- data/ext/ruby_whisper_context_params.c +163 -0
- data/ext/ruby_whisper_model.c +0 -1
- data/ext/ruby_whisper_params.c +0 -1
- data/ext/ruby_whisper_segment.c +0 -1
- data/ext/ruby_whisper_token.c +29 -9
- data/ext/ruby_whisper_transcribe.cpp +4 -1
- data/ext/ruby_whisper_vad_context.c +48 -1
- data/ext/ruby_whisper_vad_context_detect.cpp +6 -5
- data/ext/ruby_whisper_vad_params.c +0 -1
- data/ext/ruby_whisper_vad_segment.c +0 -1
- data/ext/ruby_whisper_vad_segments.c +0 -1
- data/ext/sources/CMakeLists.txt +1 -1
- data/ext/sources/bindings/javascript/package.json +1 -1
- data/ext/sources/cmake/whisper-config.cmake.in +5 -40
- data/ext/sources/examples/bench/bench.cpp +23 -18
- data/ext/sources/examples/cli/cli.cpp +8 -0
- data/ext/sources/examples/common-ggml.cpp +2 -0
- data/ext/sources/examples/miniaudio.h +4507 -2131
- data/ext/sources/examples/server/server.cpp +18 -4
- data/ext/sources/examples/talk-llama/CMakeLists.txt +3 -2
- data/ext/sources/examples/talk-llama/llama-adapter.cpp +7 -13
- data/ext/sources/examples/talk-llama/llama-adapter.h +4 -3
- data/ext/sources/examples/talk-llama/llama-arch.cpp +335 -17
- data/ext/sources/examples/talk-llama/llama-arch.h +42 -0
- data/ext/sources/examples/talk-llama/llama-batch.cpp +3 -1
- data/ext/sources/examples/talk-llama/llama-chat.cpp +21 -1
- data/ext/sources/examples/talk-llama/llama-chat.h +1 -0
- data/ext/sources/examples/talk-llama/llama-context.cpp +508 -520
- data/ext/sources/examples/talk-llama/llama-context.h +27 -28
- data/ext/sources/examples/talk-llama/llama-cparams.h +5 -0
- data/ext/sources/examples/talk-llama/llama-ext.h +12 -0
- data/ext/sources/examples/talk-llama/llama-grammar.cpp +8 -8
- data/ext/sources/examples/talk-llama/llama-graph.cpp +583 -130
- data/ext/sources/examples/talk-llama/llama-graph.h +131 -10
- data/ext/sources/examples/talk-llama/llama-hparams.cpp +57 -40
- data/ext/sources/examples/talk-llama/llama-hparams.h +79 -10
- data/ext/sources/examples/talk-llama/llama-impl.cpp +4 -4
- data/ext/sources/examples/talk-llama/llama-impl.h +13 -1
- data/ext/sources/examples/talk-llama/llama-kv-cache-iswa.cpp +3 -1
- data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +274 -89
- data/ext/sources/examples/talk-llama/llama-kv-cache.h +2 -3
- data/ext/sources/examples/talk-llama/llama-memory-hybrid-iswa.cpp +275 -0
- data/ext/sources/examples/talk-llama/llama-memory-hybrid-iswa.h +140 -0
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.cpp +11 -13
- data/ext/sources/examples/talk-llama/llama-mmap.cpp +28 -11
- data/ext/sources/examples/talk-llama/llama-model-loader.cpp +527 -119
- data/ext/sources/examples/talk-llama/llama-model-loader.h +35 -5
- data/ext/sources/examples/talk-llama/llama-model-saver.cpp +60 -46
- data/ext/sources/examples/talk-llama/llama-model-saver.h +5 -2
- data/ext/sources/examples/talk-llama/llama-model.cpp +1365 -647
- data/ext/sources/examples/talk-llama/llama-model.h +72 -19
- data/ext/sources/examples/talk-llama/llama-quant.cpp +578 -346
- data/ext/sources/examples/talk-llama/{llama-sampling.cpp → llama-sampler.cpp} +190 -76
- data/ext/sources/examples/talk-llama/{llama-sampling.h → llama-sampler.h} +0 -2
- data/ext/sources/examples/talk-llama/llama-vocab.cpp +118 -48
- data/ext/sources/examples/talk-llama/llama-vocab.h +5 -0
- data/ext/sources/examples/talk-llama/llama.cpp +76 -22
- data/ext/sources/examples/talk-llama/llama.h +63 -30
- data/ext/sources/examples/talk-llama/models/afmoe.cpp +2 -3
- data/ext/sources/examples/talk-llama/models/apertus.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/arcee.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/arctic.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/baichuan.cpp +4 -3
- data/ext/sources/examples/talk-llama/models/bailingmoe.cpp +1 -2
- data/ext/sources/examples/talk-llama/models/bailingmoe2.cpp +3 -5
- data/ext/sources/examples/talk-llama/models/bert.cpp +13 -7
- data/ext/sources/examples/talk-llama/models/bitnet.cpp +9 -24
- data/ext/sources/examples/talk-llama/models/bloom.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/chameleon.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/chatglm.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/codeshell.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/cogvlm.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/cohere2-iswa.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/command-r.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/dbrx.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/deci.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/deepseek.cpp +4 -6
- data/ext/sources/examples/talk-llama/models/deepseek2.cpp +24 -21
- data/ext/sources/examples/talk-llama/models/delta-net-base.cpp +445 -0
- data/ext/sources/examples/talk-llama/models/dots1.cpp +4 -6
- data/ext/sources/examples/talk-llama/models/dream.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/ernie4-5-moe.cpp +4 -6
- data/ext/sources/examples/talk-llama/models/ernie4-5.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/eurobert.cpp +97 -0
- data/ext/sources/examples/talk-llama/models/exaone-moe.cpp +145 -0
- data/ext/sources/examples/talk-llama/models/exaone.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/exaone4.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/falcon-h1.cpp +2 -4
- data/ext/sources/examples/talk-llama/models/falcon.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/gemma-embedding.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma2-iswa.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma3.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma3n-iswa.cpp +7 -7
- data/ext/sources/examples/talk-llama/models/glm4-moe.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/glm4.cpp +14 -7
- data/ext/sources/examples/talk-llama/models/gpt2.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/gptneox.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/granite-hybrid.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/granite.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/grok.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/grovemoe.cpp +5 -7
- data/ext/sources/examples/talk-llama/models/hunyuan-dense.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/hunyuan-moe.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/internlm2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/jais.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/jais2.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/jamba.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/kimi-linear.cpp +381 -0
- data/ext/sources/examples/talk-llama/models/lfm2.cpp +145 -124
- data/ext/sources/examples/talk-llama/models/llada-moe.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/llada.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/llama-iswa.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/llama.cpp +18 -11
- data/ext/sources/examples/talk-llama/models/maincoder.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/{graph-context-mamba.cpp → mamba-base.cpp} +9 -3
- data/ext/sources/examples/talk-llama/models/mamba.cpp +1 -2
- data/ext/sources/examples/talk-llama/models/mimo2-iswa.cpp +11 -5
- data/ext/sources/examples/talk-llama/models/minicpm3.cpp +14 -13
- data/ext/sources/examples/talk-llama/models/minimax-m2.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/mistral3.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/models.h +181 -46
- data/ext/sources/examples/talk-llama/models/modern-bert.cpp +2 -9
- data/ext/sources/examples/talk-llama/models/mpt.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/nemotron-h.cpp +26 -14
- data/ext/sources/examples/talk-llama/models/nemotron.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/neo-bert.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/olmo.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/olmo2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/olmoe.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/openai-moe-iswa.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/openelm.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/orion.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/paddleocr.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/pangu-embedded.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/phi2.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/phi3.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/plamo.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/plamo2.cpp +9 -5
- data/ext/sources/examples/talk-llama/models/plamo3.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/plm.cpp +15 -14
- data/ext/sources/examples/talk-llama/models/qwen.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/qwen2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/qwen2moe.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/qwen2vl.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/qwen3.cpp +12 -9
- data/ext/sources/examples/talk-llama/models/qwen35.cpp +381 -0
- data/ext/sources/examples/talk-llama/models/qwen35moe.cpp +422 -0
- data/ext/sources/examples/talk-llama/models/qwen3moe.cpp +15 -8
- data/ext/sources/examples/talk-llama/models/qwen3next.cpp +84 -432
- data/ext/sources/examples/talk-llama/models/qwen3vl-moe.cpp +9 -18
- data/ext/sources/examples/talk-llama/models/qwen3vl.cpp +8 -17
- data/ext/sources/examples/talk-llama/models/refact.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/rnd1.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/rwkv6-base.cpp +2 -0
- data/ext/sources/examples/talk-llama/models/rwkv7-base.cpp +2 -0
- data/ext/sources/examples/talk-llama/models/seed-oss.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/smallthinker.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/smollm3.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/stablelm.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/starcoder.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/starcoder2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/step35-iswa.cpp +165 -0
- data/ext/sources/examples/talk-llama/models/t5-dec.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/t5-enc.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/xverse.cpp +3 -3
- data/ext/sources/examples/talk-llama/unicode.cpp +21 -65
- data/ext/sources/ggml/CMakeLists.txt +9 -3
- data/ext/sources/ggml/include/ggml-backend.h +1 -1
- data/ext/sources/ggml/include/ggml-cann.h +1 -1
- data/ext/sources/ggml/include/ggml-cpu.h +5 -0
- data/ext/sources/ggml/include/ggml-openvino.h +37 -0
- data/ext/sources/ggml/include/ggml-opt.h +1 -1
- data/ext/sources/ggml/include/ggml-rpc.h +6 -1
- data/ext/sources/ggml/include/ggml-virtgpu.h +14 -0
- data/ext/sources/ggml/include/ggml.h +56 -9
- data/ext/sources/ggml/src/CMakeLists.txt +3 -0
- data/ext/sources/ggml/src/ggml-alloc.c +4 -9
- data/ext/sources/ggml/src/ggml-backend-dl.cpp +48 -0
- data/ext/sources/ggml/src/ggml-backend-dl.h +45 -0
- data/ext/sources/ggml/src/ggml-backend-reg.cpp +28 -86
- data/ext/sources/ggml/src/ggml-backend.cpp +5 -2
- data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-blas/ggml-blas.cpp +6 -2
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.cpp +1 -1
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.h +1 -1
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.cpp +348 -189
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +40 -85
- data/ext/sources/ggml/src/ggml-cann/common.h +3 -4
- data/ext/sources/ggml/src/ggml-cann/ggml-cann.cpp +44 -62
- data/ext/sources/ggml/src/ggml-common.h +11 -0
- data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +16 -11
- data/ext/sources/ggml/src/ggml-cpu/amx/amx.cpp +42 -19
- data/ext/sources/ggml/src/ggml-cpu/amx/common.h +34 -10
- data/ext/sources/ggml/src/ggml-cpu/amx/mmq.cpp +85 -85
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/quants.c +85 -1
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/repack.cpp +2744 -548
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/quants.c +1653 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/repack.cpp +1391 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/s390/quants.c +8 -10
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/quants.c +9 -9
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/repack.cpp +118 -18
- data/ext/sources/ggml/src/ggml-cpu/arch-fallback.h +107 -26
- data/ext/sources/ggml/src/ggml-cpu/binary-ops.cpp +2 -6
- data/ext/sources/ggml/src/ggml-cpu/common.h +8 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-impl.h +3 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +59 -12
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.cpp +15 -0
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +21 -20
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +965 -252
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +584 -197
- data/ext/sources/ggml/src/ggml-cpu/ops.cpp +903 -188
- data/ext/sources/ggml/src/ggml-cpu/ops.h +1 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.c +40 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.h +3 -0
- data/ext/sources/ggml/src/ggml-cpu/repack.cpp +2890 -679
- data/ext/sources/ggml/src/ggml-cpu/repack.h +119 -8
- data/ext/sources/ggml/src/ggml-cpu/simd-gemm.h +136 -0
- data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +111 -3
- data/ext/sources/ggml/src/ggml-cpu/unary-ops.cpp +1 -1
- data/ext/sources/ggml/src/ggml-cpu/vec.cpp +17 -0
- data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-cuda/argsort.cu +19 -10
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +32 -30
- data/ext/sources/ggml/src/ggml-cuda/common.cuh +134 -18
- data/ext/sources/ggml/src/ggml-cuda/convert.cu +41 -27
- data/ext/sources/ggml/src/ggml-cuda/cpy.cu +6 -3
- data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +78 -64
- data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +384 -143
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cuh +36 -22
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec.cuh +3 -3
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +26 -5
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +1 -1
- data/ext/sources/ggml/src/ggml-cuda/fattn.cu +127 -12
- data/ext/sources/ggml/src/ggml-cuda/gated_delta_net.cu +263 -0
- data/ext/sources/ggml/src/ggml-cuda/gated_delta_net.cuh +4 -0
- data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +595 -200
- data/ext/sources/ggml/src/ggml-cuda/mean.cu +9 -8
- data/ext/sources/ggml/src/ggml-cuda/mma.cuh +173 -6
- data/ext/sources/ggml/src/ggml-cuda/mmf.cu +30 -10
- data/ext/sources/ggml/src/ggml-cuda/mmf.cuh +158 -85
- data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +34 -22
- data/ext/sources/ggml/src/ggml-cuda/mmvf.cu +127 -67
- data/ext/sources/ggml/src/ggml-cuda/mmvf.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +157 -65
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cuh +1 -0
- data/ext/sources/ggml/src/ggml-cuda/norm.cu +18 -76
- data/ext/sources/ggml/src/ggml-cuda/pad.cu +13 -10
- data/ext/sources/ggml/src/ggml-cuda/quantize.cu +1 -1
- data/ext/sources/ggml/src/ggml-cuda/reduce_rows.cuh +2 -16
- data/ext/sources/ggml/src/ggml-cuda/rope.cu +233 -133
- data/ext/sources/ggml/src/ggml-cuda/softmax.cu +8 -83
- data/ext/sources/ggml/src/ggml-cuda/solve_tri.cu +1 -1
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +56 -32
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cuh +1 -1
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_32.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_32.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +3 -3
- data/ext/sources/ggml/src/ggml-cuda/top-k.cu +0 -1
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cu +199 -135
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cuh +20 -14
- data/ext/sources/ggml/src/ggml-cuda/unary.cu +55 -0
- data/ext/sources/ggml/src/ggml-cuda/unary.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +31 -17
- data/ext/sources/ggml/src/ggml-cuda/vendors/hip.h +10 -0
- data/ext/sources/ggml/src/ggml-hexagon/CMakeLists.txt +82 -45
- data/ext/sources/ggml/src/ggml-hexagon/ggml-hexagon.cpp +334 -160
- data/ext/sources/ggml/src/ggml-hexagon/htp/CMakeLists.txt +7 -5
- data/ext/sources/ggml/src/ggml-hexagon/htp/act-ops.c +328 -197
- data/ext/sources/ggml/src/ggml-hexagon/htp/argsort-ops.c +281 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/binary-ops.c +765 -234
- data/ext/sources/ggml/src/ggml-hexagon/htp/cpy-ops.c +252 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/flash-attn-ops.c +412 -265
- data/ext/sources/ggml/src/ggml-hexagon/htp/get-rows-ops.c +23 -23
- data/ext/sources/ggml/src/ggml-hexagon/htp/{htp-dma.c → hex-dma.c} +1 -1
- data/ext/sources/ggml/src/ggml-hexagon/htp/{htp-dma.h → hex-dma.h} +28 -3
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-dump.h +77 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-fastdiv.h +37 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-utils.h +51 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ctx.h +1 -1
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-msg.h +27 -37
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ops.h +6 -35
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-arith.h +443 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-base.h +240 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-copy.h +245 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-div.h +251 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-dump.h +129 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-exp.h +215 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-floor.h +100 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-inverse.h +210 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-reduce.h +296 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-scale.h +133 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sigmoid.h +141 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sqrt.h +126 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-types.h +36 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-utils.h +20 -1347
- data/ext/sources/ggml/src/ggml-hexagon/htp/main.c +211 -13
- data/ext/sources/ggml/src/ggml-hexagon/htp/matmul-ops.c +1119 -952
- data/ext/sources/ggml/src/ggml-hexagon/htp/rope-ops.c +254 -244
- data/ext/sources/ggml/src/ggml-hexagon/htp/set-rows-ops.c +36 -36
- data/ext/sources/ggml/src/ggml-hexagon/htp/softmax-ops.c +155 -138
- data/ext/sources/ggml/src/ggml-hexagon/htp/ssm-conv.c +339 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/sum-rows-ops.c +128 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/unary-ops.c +209 -114
- data/ext/sources/ggml/src/ggml-hexagon/htp/worker-pool.c +1 -5
- data/ext/sources/ggml/src/ggml-hexagon/htp-drv.cpp +418 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp-drv.h +121 -0
- data/ext/sources/ggml/src/ggml-hexagon/libdl.h +79 -0
- data/ext/sources/ggml/src/ggml-hexagon/libggml-htp.inf +38 -0
- data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +6 -0
- data/ext/sources/ggml/src/ggml-impl.h +62 -0
- data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +10 -10
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-common.cpp +13 -2
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.h +8 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.m +147 -17
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.cpp +274 -73
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.h +22 -4
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.m +102 -36
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +174 -23
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.cpp +580 -280
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.h +5 -4
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.cpp +320 -107
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.metal +1068 -825
- data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +19 -1
- data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +3108 -636
- data/ext/sources/ggml/src/ggml-opencl/kernels/concat.cl +41 -99
- data/ext/sources/ggml/src/ggml-opencl/kernels/cpy.cl +45 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cumsum.cl +139 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +204 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/diag.cl +27 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/exp.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/expm1.cl +87 -56
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemm_noshuffle_q4_1_f32.cl +132 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general_q8_0_f32.cl +195 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_q4_1_f32.cl +283 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/l2_norm.cl +71 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mean.cl +114 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q4_0_f32_l4_lm.cl +163 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q4_1_f32_l4_lm.cl +165 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q6_k_f32_l4_lm.cl +158 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q8_0_f32_8x4.cl +129 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_1_f32.cl +219 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_1_f32_flat.cl +229 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_k_f32.cl +180 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/{mul_mv_q6_k.cl → mul_mv_q6_k_f32.cl} +4 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q6_k_f32_flat.cl +194 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/neg.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/repeat.cl +31 -32
- data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +14 -4
- data/ext/sources/ggml/src/ggml-opencl/kernels/softplus.cl +88 -60
- data/ext/sources/ggml/src/ggml-opencl/kernels/solve_tri.cl +51 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sum_rows.cl +114 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/tanh.cl +94 -48
- data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +26 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/tri.cl +32 -0
- data/ext/sources/ggml/src/ggml-openvino/.clang-format +154 -0
- data/ext/sources/ggml/src/ggml-openvino/CMakeLists.txt +22 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-decoder.cpp +975 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-decoder.h +294 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino-extra.cpp +373 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino-extra.h +182 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino.cpp +1110 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-quants.cpp +884 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-quants.h +153 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/decoder.h +74 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/frontend.cpp +27 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/frontend.h +23 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/input_model.cpp +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/input_model.h +29 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/node_context.h +112 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/cont.cpp +48 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/cpy.cpp +21 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/flash_attn_ext.cpp +90 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/get_rows.cpp +69 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/glu_geglu.cpp +61 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/glu_swiglu.cpp +62 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/mulmat.cpp +90 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/permute.cpp +102 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/reshape.cpp +83 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/rms_norm.cpp +46 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/rope.cpp +123 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/scale.cpp +41 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/set_rows.cpp +76 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/softmax.cpp +89 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/transpose.cpp +23 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/unary_silu.cpp +27 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/view.cpp +53 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op_table.cpp +46 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op_table.h +39 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/eliminate_zp.cpp +123 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/eliminate_zp.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/fuse_to_sdpa.cpp +60 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/fuse_to_sdpa.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/mark_decompression_convert_constant_folding.h +29 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/squeeze_matmul.cpp +58 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/squeeze_matmul.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/translate_session.cpp +293 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/translate_session.h +28 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/utils.cpp +226 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/utils.h +85 -0
- data/ext/sources/ggml/src/ggml-openvino/utils.cpp +823 -0
- data/ext/sources/ggml/src/ggml-openvino/utils.h +123 -0
- data/ext/sources/ggml/src/ggml-quants.c +96 -5
- data/ext/sources/ggml/src/ggml-quants.h +3 -0
- data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +15 -88
- data/ext/sources/ggml/src/ggml-sycl/add-id.cpp +5 -1
- data/ext/sources/ggml/src/ggml-sycl/backend.hpp +1 -0
- data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +21 -20
- data/ext/sources/ggml/src/ggml-sycl/common.hpp +315 -10
- data/ext/sources/ggml/src/ggml-sycl/convert.cpp +69 -1
- data/ext/sources/ggml/src/ggml-sycl/convert.hpp +22 -1
- data/ext/sources/ggml/src/ggml-sycl/count-equal.cpp +1 -1
- data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +791 -47
- data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +78 -68
- data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +2 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-common.hpp +1179 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-tile.cpp +55 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-tile.hpp +1338 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-vec.hpp +667 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn.cpp +225 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn.hpp +22 -0
- data/ext/sources/ggml/src/ggml-sycl/gated_delta_net.cpp +309 -0
- data/ext/sources/ggml/src/ggml-sycl/gated_delta_net.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/ggml-sycl.cpp +316 -51
- data/ext/sources/ggml/src/ggml-sycl/norm.cpp +65 -66
- data/ext/sources/ggml/src/ggml-sycl/outprod.cpp +3 -3
- data/ext/sources/ggml/src/ggml-sycl/presets.hpp +3 -0
- data/ext/sources/ggml/src/ggml-sycl/quants.hpp +1 -1
- data/ext/sources/ggml/src/ggml-sycl/rope.cpp +450 -287
- data/ext/sources/ggml/src/ggml-sycl/rope.hpp +6 -0
- data/ext/sources/ggml/src/ggml-sycl/softmax.cpp +6 -6
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq112-dv112.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq128-dv128.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq256-dv256.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq40-dv40.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq576-dv512.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq64-dv64.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq72-dv72.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq80-dv80.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq96-dv96.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +13 -0
- data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +1 -1
- data/ext/sources/ggml/src/ggml-virtgpu/CMakeLists.txt +70 -0
- data/ext/sources/ggml/src/ggml-virtgpu/apir_cs_ggml-rpc-front.cpp +87 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/CMakeLists.txt +21 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/apir_cs_ggml-rpc-back.cpp +115 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-convert.h +13 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-backend.cpp +102 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-buffer-type.cpp +105 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-buffer.cpp +179 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-device.cpp +148 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.cpp +51 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.gen.h +73 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.h +27 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-virgl-apir.h +32 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend.cpp +144 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/api_remoting.h +95 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_backend.gen.h +94 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_backend.h +50 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs.h +378 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs_ggml.h +232 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs_rpc.h +58 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-buffer-type.cpp +81 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-buffer.cpp +119 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-device.cpp +158 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-reg.cpp +213 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend.cpp +69 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-remoting.h +71 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggmlremoting_functions.yaml +166 -0
- data/ext/sources/ggml/src/ggml-virtgpu/include/apir_hw.h +9 -0
- data/ext/sources/ggml/src/ggml-virtgpu/regenerate_remoting.py +333 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-apir.h +15 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-backend.cpp +58 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-buffer-type.cpp +110 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-buffer.cpp +173 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-device.cpp +192 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-impl.h +36 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward.gen.h +53 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-shm.cpp +98 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-shm.h +23 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-utils.cpp +179 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-utils.h +86 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu.cpp +544 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu.h +117 -0
- data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/ggml-vulkan.cpp +1250 -465
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +16 -8
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/elu.comp +27 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +374 -170
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.glsl +66 -22
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +389 -201
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +106 -58
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_mask_opt.comp +162 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +9 -8
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gated_delta_net.comp +128 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +12 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.glsl +20 -17
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +11 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +8 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_id_funcs.glsl +3 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +5 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.glsl +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +2 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_funcs.glsl +36 -63
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_params.glsl +10 -5
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sgn.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp +16 -10
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +55 -35
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu-shader-lib.hpp +1314 -109
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu.cpp +1660 -1371
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argmax.wgsl +72 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argsort.wgsl +106 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argsort_merge.wgsl +134 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary.wgsl +141 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/common_decls.tmpl +65 -72
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/concat.wgsl +75 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cpy.tmpl.wgsl +6 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cumsum.wgsl +66 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/embed_wgsl.py +40 -5
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/flash_attn.wgsl +105 -60
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{get_rows.tmpl.wgsl → get_rows.wgsl} +53 -259
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat.tmpl.wgsl → mul_mat.wgsl} +68 -257
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_decls.tmpl +692 -23
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat_reg_tile.tmpl.wgsl → mul_mat_reg_tile.wgsl} +28 -128
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat_subgroup_matrix.tmpl.wgsl → mul_mat_subgroup_matrix.wgsl} +31 -137
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_vec.wgsl +480 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/pad.wgsl +86 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/repeat.wgsl +67 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{scale.tmpl.wgsl → scale.wgsl} +9 -36
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.wgsl +40 -12
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/sum_rows.wgsl +55 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/unary.wgsl +193 -0
- data/ext/sources/ggml/src/ggml-zdnn/ggml-zdnn.cpp +6 -1
- data/ext/sources/ggml/src/ggml-zendnn/CMakeLists.txt +31 -32
- data/ext/sources/ggml/src/ggml-zendnn/ggml-zendnn.cpp +9 -6
- data/ext/sources/ggml/src/ggml.c +167 -33
- data/ext/sources/ggml/src/gguf.cpp +229 -44
- data/ext/sources/src/whisper.cpp +6 -28
- data/sig/whisper.rbs +43 -2
- data/test/test_context_params.rb +82 -0
- data/test/test_token.rb +11 -0
- data/test/test_vad_context.rb +58 -8
- data/test/test_whisper.rb +20 -0
- data/whispercpp.gemspec +1 -1
- metadata +240 -28
- data/ext/sources/ggml/cmake/BuildTypes.cmake +0 -54
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm-ppc.h +0 -333
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-exp.c +0 -94
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-inverse.c +0 -72
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sigmoid.c +0 -49
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-utils.c +0 -1020
- data/ext/sources/ggml/src/ggml-hexagon/htp/ops-utils.h +0 -149
- data/ext/sources/ggml/src/ggml-hexagon/htp-utils.c +0 -454
- data/ext/sources/ggml/src/ggml-hexagon/htp-utils.h +0 -221
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/bin_op.tmpl.wgsl +0 -188
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary_head.tmpl +0 -45
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_vec.tmpl.wgsl +0 -267
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.tmpl.wgsl +0 -112
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/unary_op.wgsl +0 -483
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
|
|
3
|
+
#include "ggml-quants.h"
|
|
4
|
+
#include "ggml.h"
|
|
5
|
+
#include "openvino/decoder.h"
|
|
6
|
+
|
|
7
|
+
#include <cstdint>
|
|
8
|
+
#include <cstring>
|
|
9
|
+
#include <map>
|
|
10
|
+
#include <memory>
|
|
11
|
+
#include <openvino/core/partial_shape.hpp>
|
|
12
|
+
#include <optional>
|
|
13
|
+
#include <vector>
|
|
14
|
+
|
|
15
|
+
struct ModelParams {
|
|
16
|
+
int ctx = -1;
|
|
17
|
+
int ctx_swa = -1;
|
|
18
|
+
int ctx_per_seq = -1;
|
|
19
|
+
int ctx_per_seq_swa = -1;
|
|
20
|
+
int n_seq = 1;
|
|
21
|
+
int n_heads = -1;
|
|
22
|
+
int n_heads_kv = -1;
|
|
23
|
+
int head_size = -1;
|
|
24
|
+
int32_t rope_params[15];
|
|
25
|
+
std::vector<int> swa_layers;
|
|
26
|
+
|
|
27
|
+
std::vector<std::string> kv_names;
|
|
28
|
+
size_t kv_buffer_ctx_id = 0;
|
|
29
|
+
|
|
30
|
+
bool same_rope_params(const ModelParams & other) const {
|
|
31
|
+
return memcmp(rope_params, other.rope_params, sizeof(int32_t) * 15) == 0;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
bool can_reuse_dynamically(const ModelParams & other) const { return same_rope_params(other); }
|
|
35
|
+
|
|
36
|
+
bool can_reuse_statically(const ModelParams & other) const { return same_rope_params(other) && ctx == other.ctx; }
|
|
37
|
+
|
|
38
|
+
bool kv_buffer_changed(const ModelParams & other) const { return kv_buffer_ctx_id != other.kv_buffer_ctx_id; }
|
|
39
|
+
};
|
|
40
|
+
|
|
41
|
+
struct ComputeParams {
|
|
42
|
+
int n_seq_active = 1;
|
|
43
|
+
int seq_active_start = 0;
|
|
44
|
+
int attention_size = -1;
|
|
45
|
+
int attention_size_swa = -1;
|
|
46
|
+
int input_len = -1;
|
|
47
|
+
int token_len_per_seq = -1;
|
|
48
|
+
int past_kv_len = -1;
|
|
49
|
+
int output_len = 1;
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
class GgmlOvDecoder : public ov::frontend::ggml::GgmlDecoder {
|
|
53
|
+
public:
|
|
54
|
+
struct NodeInfo {
|
|
55
|
+
ggml_tensor * node;
|
|
56
|
+
std::string node_name;
|
|
57
|
+
std::string node_op_type;
|
|
58
|
+
std::map<std::string, ggml_tensor *> node_inputs;
|
|
59
|
+
std::vector<std::string> node_inputs_names;
|
|
60
|
+
ggml_tensor * node_output;
|
|
61
|
+
std::string node_output_name;
|
|
62
|
+
int node_op_case = 0;
|
|
63
|
+
void * data_addr;
|
|
64
|
+
};
|
|
65
|
+
// Graph decoder
|
|
66
|
+
GgmlOvDecoder(ggml_cgraph * cgraph,
|
|
67
|
+
ModelParams & model_params,
|
|
68
|
+
ComputeParams & compute_params,
|
|
69
|
+
std::map<std::string, std::shared_ptr<ov::Node>> & model_weights,
|
|
70
|
+
bool is_static,
|
|
71
|
+
bool is_stateful = false,
|
|
72
|
+
bool is_prefill = false,
|
|
73
|
+
int prefill_chunk_size = 256);
|
|
74
|
+
|
|
75
|
+
// Naive graph decoder
|
|
76
|
+
GgmlOvDecoder(ggml_cgraph * cgraph, std::map<std::string, std::shared_ptr<ov::Node>> & model_weights);
|
|
77
|
+
|
|
78
|
+
virtual ov::Any get_attribute(const std::string & name) const override {
|
|
79
|
+
return nullptr;
|
|
80
|
+
GGML_UNUSED(name);
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
virtual ov::PartialShape get_input_shape(int node_idx, const std::string & name) const override;
|
|
84
|
+
|
|
85
|
+
virtual std::vector<size_t> get_input_stride(int node_idx, const std::string & name) const override;
|
|
86
|
+
|
|
87
|
+
virtual ov::element::Type get_input_type(int node_idx, const std::string & name) const override;
|
|
88
|
+
|
|
89
|
+
virtual size_t get_input_size() const override;
|
|
90
|
+
|
|
91
|
+
virtual size_t get_input_size(int node_idx) const override;
|
|
92
|
+
|
|
93
|
+
virtual void get_input_node(size_t input_port_idx,
|
|
94
|
+
std::string & producer_name,
|
|
95
|
+
std::string & producer_output_port_name,
|
|
96
|
+
size_t & producer_output_port_index) const override {
|
|
97
|
+
GGML_UNUSED(input_port_idx);
|
|
98
|
+
GGML_UNUSED(producer_name);
|
|
99
|
+
GGML_UNUSED(producer_output_port_name);
|
|
100
|
+
GGML_UNUSED(producer_output_port_index);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
virtual std::vector<std::string> get_input_names(int node_idx) const override;
|
|
104
|
+
|
|
105
|
+
virtual ov::PartialShape get_output_shape(int node_idx) const override;
|
|
106
|
+
|
|
107
|
+
virtual ov::element::Type get_output_type(int node_idx) const override;
|
|
108
|
+
|
|
109
|
+
virtual int32_t * get_input_op_params(int node_idx, const std::string & name) const override;
|
|
110
|
+
|
|
111
|
+
virtual int32_t * get_output_op_params(int node_idx) const override;
|
|
112
|
+
|
|
113
|
+
virtual std::vector<std::string> get_output_names(int node_idx) const override;
|
|
114
|
+
|
|
115
|
+
virtual const std::string & get_op_type() const override;
|
|
116
|
+
|
|
117
|
+
virtual const std::string & get_op_type(int node_idx) const override;
|
|
118
|
+
|
|
119
|
+
virtual const std::string & get_op_name() const override;
|
|
120
|
+
|
|
121
|
+
virtual const std::string & get_op_name(int node_idx) const override;
|
|
122
|
+
|
|
123
|
+
virtual void visit_subgraph(std::function<void(std::shared_ptr<GgmlDecoder>, int node_idx)> node_visitor) const override;
|
|
124
|
+
|
|
125
|
+
ggml_tensor * get_input_ggml_tensor(const std::string & name) const { return m_inputs.at(name); }
|
|
126
|
+
|
|
127
|
+
virtual int get_op_case(int node_idx) const override { return m_node_info_list[node_idx].node_op_case; }
|
|
128
|
+
|
|
129
|
+
virtual const std::map<std::string, std::shared_ptr<ov::Node>> & get_model_inputs() const override {
|
|
130
|
+
return m_model_inputs;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
virtual const std::map<std::string, std::shared_ptr<ov::Node>> & get_model_extra_inputs() const override {
|
|
134
|
+
return m_model_extra_inputs;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
virtual const std::map<std::string, std::shared_ptr<ov::Tensor>> & get_model_extra_input_values() const {
|
|
138
|
+
return m_model_extra_input_values;
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
virtual const std::map<std::string, std::shared_ptr<ov::Node>> & get_model_weights() const override {
|
|
142
|
+
return m_model_weights;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
virtual std::vector<std::string> get_model_output_names() const override {
|
|
146
|
+
return m_model_output_names;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
const std::map<std::string, ggml_tensor *> & get_model_outputs() const { return m_model_outputs; }
|
|
150
|
+
|
|
151
|
+
virtual int get_ctx_size() const { return m_model_params.ctx; }
|
|
152
|
+
|
|
153
|
+
virtual int get_ctx_swa_size() const { return m_model_params.ctx_swa; }
|
|
154
|
+
|
|
155
|
+
virtual int get_ctx_per_seq() const { return m_model_params.ctx_per_seq; }
|
|
156
|
+
|
|
157
|
+
virtual int get_ctx_per_seq_swa() const { return m_model_params.ctx_per_seq_swa; }
|
|
158
|
+
|
|
159
|
+
virtual int get_n_seq() const { return m_model_params.n_seq; }
|
|
160
|
+
|
|
161
|
+
virtual int is_swa_layer(int layer) const override {
|
|
162
|
+
return std::find(m_model_params.swa_layers.begin(), m_model_params.swa_layers.end(), layer) !=
|
|
163
|
+
m_model_params.swa_layers.end();
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
int get_past_kv_len() const { return m_compute_params.past_kv_len; }
|
|
167
|
+
|
|
168
|
+
int get_input_len() const { return m_compute_params.input_len; }
|
|
169
|
+
|
|
170
|
+
virtual int32_t * get_rope_params() const override { return const_cast<int32_t *>(m_model_params.rope_params); }
|
|
171
|
+
|
|
172
|
+
virtual std::map<std::string, std::string> get_kv_param_res_names() const override;
|
|
173
|
+
|
|
174
|
+
virtual bool is_static() const override { return m_is_static; }
|
|
175
|
+
|
|
176
|
+
virtual bool is_stateful() const override { return m_is_stateful; }
|
|
177
|
+
|
|
178
|
+
ov::PartialShape get_graph_input_shape(const ggml_tensor * op, const ggml_tensor * input) const;
|
|
179
|
+
|
|
180
|
+
static void dump_cgraph(const ggml_cgraph * cgraph, std::string & filename);
|
|
181
|
+
|
|
182
|
+
static std::shared_ptr<ov::Node> create_weight_node(ggml_tensor * tensor, bool naive = false);
|
|
183
|
+
|
|
184
|
+
static std::map<std::string, std::shared_ptr<ov::Node>> create_weight_nodes(ggml_cgraph * cgraph,
|
|
185
|
+
bool naive = false);
|
|
186
|
+
|
|
187
|
+
const ggml_tensor * get_tensor_used_op(const ggml_tensor * tensor) const;
|
|
188
|
+
|
|
189
|
+
const ggml_tensor * get_tensor_from_name(const std::string & name) const;
|
|
190
|
+
|
|
191
|
+
void clear_model_weights() { m_model_weights.clear(); }
|
|
192
|
+
|
|
193
|
+
static std::pair<ModelParams, ComputeParams> compute_llm_params(ggml_cgraph * cgraph, bool is_static);
|
|
194
|
+
|
|
195
|
+
ModelParams get_model_params() const { return m_model_params; }
|
|
196
|
+
|
|
197
|
+
ComputeParams get_compute_params() const { return m_compute_params; }
|
|
198
|
+
|
|
199
|
+
void set_model_params(const ModelParams & model_params) { m_model_params = model_params; }
|
|
200
|
+
|
|
201
|
+
void set_compute_params(const ComputeParams & compute_params) { m_compute_params = compute_params; }
|
|
202
|
+
|
|
203
|
+
bool m_is_static = false;
|
|
204
|
+
bool m_is_stateful = false;
|
|
205
|
+
bool m_is_prefill = false;
|
|
206
|
+
bool m_naive = false;
|
|
207
|
+
int m_prefill_chunk_size = 0;
|
|
208
|
+
|
|
209
|
+
static ov::Shape get_shape(const ggml_tensor * tensor);
|
|
210
|
+
static std::vector<size_t> get_stride(const ggml_tensor * tensor);
|
|
211
|
+
static ov::element::Type get_ov_type(const ggml_tensor * tensor);
|
|
212
|
+
static std::string compute_op_type(const ggml_tensor * node);
|
|
213
|
+
void add_extra_inputs();
|
|
214
|
+
|
|
215
|
+
void update_io(ggml_cgraph * cgraph);
|
|
216
|
+
|
|
217
|
+
inline static bool is_inp_tok(const ggml_tensor * tensor, const ggml_tensor * op) {
|
|
218
|
+
return op->op == GGML_OP_GET_ROWS && tensor == op->src[1] && op->src[0]->op == GGML_OP_NONE;
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
inline static bool is_inp_pos(const ggml_tensor * tensor, const ggml_tensor * op) {
|
|
222
|
+
return op->op == GGML_OP_ROPE && tensor == op->src[1];
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
inline static bool is_inp_emb(const ggml_tensor * tensor, const ggml_tensor * op) {
|
|
226
|
+
return tensor->op == GGML_OP_GET_ROWS && op->op == GGML_OP_RMS_NORM;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
inline static bool is_inp_mask(const ggml_tensor * tensor, const ggml_tensor * op) {
|
|
230
|
+
return op->op == GGML_OP_CPY || (op->op == GGML_OP_FLASH_ATTN_EXT && tensor == op->src[3]);
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
inline static bool is_rope_freqs_weight(const ggml_tensor * tensor, const ggml_tensor * op) {
|
|
234
|
+
return op->op == GGML_OP_ROPE && tensor == op->src[2];
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
inline static bool is_kvcache(const ggml_tensor * tensor, const ggml_tensor * op) {
|
|
238
|
+
return op->op == GGML_OP_SET_ROWS && op->src[2] == tensor;
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
inline static bool is_kv_idx(const ggml_tensor * tensor, const ggml_tensor * op) {
|
|
242
|
+
return op->op == GGML_OP_SET_ROWS && op->src[1] == tensor;
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
inline static bool is_output_idx(const ggml_tensor * tensor, const ggml_tensor * op) {
|
|
246
|
+
return op->op == GGML_OP_GET_ROWS && tensor == op->src[1] && op->src[0]->op != GGML_OP_NONE;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
static std::string get_graph_input_ov_name(const ggml_tensor * tensor, const ggml_tensor * op) {
|
|
250
|
+
if (is_inp_tok(tensor, op)) {
|
|
251
|
+
return "inp_tokens";
|
|
252
|
+
}
|
|
253
|
+
if (is_inp_pos(tensor, op)) {
|
|
254
|
+
return "inp_pos";
|
|
255
|
+
}
|
|
256
|
+
if (is_inp_emb(tensor, op)) {
|
|
257
|
+
return "embd";
|
|
258
|
+
}
|
|
259
|
+
if (is_output_idx(tensor, op)) {
|
|
260
|
+
return "inp_out_ids";
|
|
261
|
+
}
|
|
262
|
+
if (is_inp_mask(tensor, op)) {
|
|
263
|
+
return std::string(tensor->name).find("swa") == std::string::npos ? "self_kq_mask" : "self_kq_mask_swa";
|
|
264
|
+
}
|
|
265
|
+
return tensor->name;
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
private:
|
|
269
|
+
void set_input_output();
|
|
270
|
+
int compute_op_case(const ggml_tensor * node) const;
|
|
271
|
+
bool node_is_used_as_src(const int node_idx);
|
|
272
|
+
void compute_model_inputs();
|
|
273
|
+
void compute_model_outputs();
|
|
274
|
+
|
|
275
|
+
void validate_cgraph() const;
|
|
276
|
+
|
|
277
|
+
ggml_cgraph * m_cgraph = nullptr;
|
|
278
|
+
std::map<std::string, ggml_tensor *> m_inputs;
|
|
279
|
+
|
|
280
|
+
std::map<std::string, std::shared_ptr<ov::Node>> m_model_inputs;
|
|
281
|
+
std::map<std::string, std::shared_ptr<ov::Node>> m_model_extra_inputs;
|
|
282
|
+
std::map<std::string, std::shared_ptr<ov::Tensor>> m_model_extra_input_values;
|
|
283
|
+
std::map<std::string, std::shared_ptr<ov::Node>> m_model_weights;
|
|
284
|
+
std::map<std::string, ggml_tensor *> m_model_outputs;
|
|
285
|
+
std::vector<std::string> m_model_output_names;
|
|
286
|
+
std::vector<NodeInfo> m_node_info_list;
|
|
287
|
+
|
|
288
|
+
ModelParams m_model_params;
|
|
289
|
+
ComputeParams m_compute_params;
|
|
290
|
+
};
|
|
291
|
+
|
|
292
|
+
void print_tensor_address_map(const ggml_cgraph * cgraph);
|
|
293
|
+
|
|
294
|
+
int extract_layer_from_name(const std::string & name);
|
|
@@ -0,0 +1,373 @@
|
|
|
1
|
+
#include "ggml-openvino-extra.h"
|
|
2
|
+
|
|
3
|
+
#include "ggml-impl.h"
|
|
4
|
+
#include "ggml.h"
|
|
5
|
+
|
|
6
|
+
#include <cstring>
|
|
7
|
+
#include <openvino/runtime/intel_gpu/ocl/ocl.hpp>
|
|
8
|
+
#include <openvino/runtime/intel_npu/level_zero/level_zero.hpp>
|
|
9
|
+
#include <optional>
|
|
10
|
+
|
|
11
|
+
ov::Core & ov_singleton_core() {
|
|
12
|
+
static ov::Core core;
|
|
13
|
+
return core;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
// =====================================================
|
|
17
|
+
// Device Configuration Implementations
|
|
18
|
+
// =====================================================
|
|
19
|
+
|
|
20
|
+
void ggml_openvino_device_config::init() {
|
|
21
|
+
if (initialized) {
|
|
22
|
+
return;
|
|
23
|
+
}
|
|
24
|
+
device_name = getenv("GGML_OPENVINO_DEVICE") ? getenv("GGML_OPENVINO_DEVICE") : "CPU";
|
|
25
|
+
auto available_devices = ov_singleton_core().get_available_devices();
|
|
26
|
+
if (std::find(available_devices.begin(), available_devices.end(), device_name) == available_devices.end()) {
|
|
27
|
+
GGML_LOG_WARN("GGML OpenVINO Backend: device %s is not available, fallback to CPU\n", device_name.c_str());
|
|
28
|
+
device_name = "CPU";
|
|
29
|
+
}
|
|
30
|
+
is_npu = (device_name == "NPU");
|
|
31
|
+
|
|
32
|
+
auto * cache_dir = getenv("GGML_OPENVINO_CACHE_DIR");
|
|
33
|
+
if (device_name == "NPU") {
|
|
34
|
+
compile_config = {
|
|
35
|
+
{"NPU_COMPILER_DYNAMIC_QUANTIZATION", "YES" },
|
|
36
|
+
{"NPU_USE_NPUW", "YES" },
|
|
37
|
+
{"NPUW_DEVICES", "NPU" },
|
|
38
|
+
{"NPUW_FOLD", "YES" },
|
|
39
|
+
{"NPUW_WEIGHTS_BANK", "shared"},
|
|
40
|
+
{"NPUW_FUNCALL_FOR_ALL", "YES" },
|
|
41
|
+
{"NPUW_FUNCALL_ASYNC", "YES" },
|
|
42
|
+
{"NPUW_DQ", "YES" },
|
|
43
|
+
{"NPUW_DQ_FULL", "NO" },
|
|
44
|
+
};
|
|
45
|
+
if (cache_dir) {
|
|
46
|
+
compile_config["NPUW_CACHE_DIR"] = cache_dir;
|
|
47
|
+
}
|
|
48
|
+
} else if (cache_dir) {
|
|
49
|
+
ov_singleton_core().set_property(ov::cache_dir(cache_dir));
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// Initialize remote context with queue sharing for GPU
|
|
53
|
+
if (device_name == "GPU") {
|
|
54
|
+
// Create OpenCL context and queue
|
|
55
|
+
cl_int err;
|
|
56
|
+
cl_platform_id platform;
|
|
57
|
+
err = clGetPlatformIDs(1, &platform, nullptr);
|
|
58
|
+
if (err != CL_SUCCESS) {
|
|
59
|
+
GGML_LOG_ERROR("Failed to get OpenCL platform: %d\n", err);
|
|
60
|
+
return;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
cl_device_id cl_device;
|
|
64
|
+
err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &cl_device, nullptr);
|
|
65
|
+
if (err != CL_SUCCESS) {
|
|
66
|
+
GGML_LOG_ERROR("Failed to get OpenCL device: %d\n", err);
|
|
67
|
+
return;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
cl_context cl_ctx = clCreateContext(nullptr, 1, &cl_device, nullptr, nullptr, &err);
|
|
71
|
+
if (err != CL_SUCCESS) {
|
|
72
|
+
GGML_LOG_ERROR("Failed to create OpenCL context: %d\n", err);
|
|
73
|
+
return;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
cl_queue = clCreateCommandQueueWithProperties(cl_ctx, cl_device, nullptr, &err);
|
|
77
|
+
if (err != CL_SUCCESS) {
|
|
78
|
+
GGML_LOG_ERROR("Failed to create OpenCL command queue: %d\n", err);
|
|
79
|
+
clReleaseContext(cl_ctx);
|
|
80
|
+
return;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// Create OpenVINO remote context with queue sharing
|
|
84
|
+
remote_context = ov::intel_gpu::ocl::ClContext(ov_singleton_core(), cl_queue);
|
|
85
|
+
|
|
86
|
+
// Release the context (queue keeps a reference)
|
|
87
|
+
clReleaseContext(cl_ctx);
|
|
88
|
+
} else if (device_name == "NPU") {
|
|
89
|
+
// remote tensor is not used for NPU yet
|
|
90
|
+
// remote_context = ov_singleton_core().get_default_context(device_name);
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
initialized = true;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
ggml_openvino_device_config::~ggml_openvino_device_config() {
|
|
97
|
+
if (cl_queue != nullptr) {
|
|
98
|
+
clReleaseCommandQueue(cl_queue);
|
|
99
|
+
cl_queue = nullptr;
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// Get the global device config singleton
|
|
104
|
+
ggml_openvino_device_config & ggml_openvino_get_device_config() {
|
|
105
|
+
static ggml_openvino_device_config config;
|
|
106
|
+
return config;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// Initialize device config (call during backend init)
|
|
110
|
+
void ggml_openvino_init_device_config() {
|
|
111
|
+
ggml_openvino_get_device_config().init();
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
// Get the device name
|
|
115
|
+
const std::string & ggml_openvino_get_device_name() {
|
|
116
|
+
return ggml_openvino_get_device_config().device_name;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// Check if running on NPU
|
|
120
|
+
bool ggml_openvino_is_npu() {
|
|
121
|
+
return ggml_openvino_get_device_config().is_npu;
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// Get the remote context for the current device (returns empty optional for CPU)
|
|
125
|
+
std::optional<ov::RemoteContext> ggml_openvino_get_remote_context() {
|
|
126
|
+
return ggml_openvino_get_device_config().remote_context;
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// Get the compile config for the current device
|
|
130
|
+
const ov::AnyMap & ggml_openvino_get_compile_config() {
|
|
131
|
+
return ggml_openvino_get_device_config().compile_config;
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// Get the OpenCL command queue for GPU operations
|
|
135
|
+
cl_command_queue ggml_openvino_get_cl_queue() {
|
|
136
|
+
return ggml_openvino_get_device_config().cl_queue;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// Get the clEnqueueMemFillINTEL function pointer (lazy load)
|
|
140
|
+
clEnqueueMemFillINTEL_fn ggml_openvino_get_clEnqueueMemFillINTEL() {
|
|
141
|
+
static clEnqueueMemFillINTEL_fn fn = nullptr;
|
|
142
|
+
static bool loaded = false;
|
|
143
|
+
if (!loaded) {
|
|
144
|
+
loaded = true;
|
|
145
|
+
cl_platform_id platform;
|
|
146
|
+
if (clGetPlatformIDs(1, &platform, nullptr) == CL_SUCCESS) {
|
|
147
|
+
fn = (clEnqueueMemFillINTEL_fn) clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueMemFillINTEL");
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
return fn;
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
// Get the clEnqueueMemcpyINTEL function pointer (lazy load)
|
|
154
|
+
clEnqueueMemcpyINTEL_fn ggml_openvino_get_clEnqueueMemcpyINTEL() {
|
|
155
|
+
static clEnqueueMemcpyINTEL_fn fn = nullptr;
|
|
156
|
+
static bool loaded = false;
|
|
157
|
+
if (!loaded) {
|
|
158
|
+
loaded = true;
|
|
159
|
+
cl_platform_id platform;
|
|
160
|
+
if (clGetPlatformIDs(1, &platform, nullptr) == CL_SUCCESS) {
|
|
161
|
+
fn = (clEnqueueMemcpyINTEL_fn) clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueMemcpyINTEL");
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
return fn;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// Get requantization type for a tensor type (returns nullopt if no requant needed)
|
|
168
|
+
std::optional<ExtraQuantType> ggml_openvino_get_requant_type(const ggml_tensor * tensor, bool no_requant) {
|
|
169
|
+
if (no_requant) {
|
|
170
|
+
return std::nullopt;
|
|
171
|
+
}
|
|
172
|
+
if (strncmp(tensor->name, "token_embd.weight", 17) == 0) {
|
|
173
|
+
return ((ggml_openvino_is_npu() && tensor->type == GGML_TYPE_Q6_K) ? ExtraQuantType::F16 : ExtraQuantType::Q8_0_C);
|
|
174
|
+
}
|
|
175
|
+
if (strncmp(tensor->name, "output.weight", 13) == 0) {
|
|
176
|
+
return ExtraQuantType::Q8_0_C;
|
|
177
|
+
}
|
|
178
|
+
if (ggml_openvino_is_npu()) {
|
|
179
|
+
return ExtraQuantType::Q4_0_128;
|
|
180
|
+
}
|
|
181
|
+
switch (tensor->type) {
|
|
182
|
+
case GGML_TYPE_Q6_K:
|
|
183
|
+
case GGML_TYPE_Q5_K:
|
|
184
|
+
return ExtraQuantType::Q8_0_C;
|
|
185
|
+
default:
|
|
186
|
+
return std::nullopt;
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
// =====================================================
|
|
191
|
+
// Extracted Layout Calculation
|
|
192
|
+
// =====================================================
|
|
193
|
+
|
|
194
|
+
ggml_openvino_extracted_layout ggml_openvino_get_extracted_layout(const ggml_tensor * tensor, bool use_bias) {
|
|
195
|
+
ggml_openvino_extracted_layout layout = {};
|
|
196
|
+
layout.is_symmetric = false;
|
|
197
|
+
|
|
198
|
+
if (!ggml_is_quantized(tensor->type)) {
|
|
199
|
+
return layout;
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// Only handle 2D weight tensors
|
|
203
|
+
if (tensor->ne[2] != 1 || tensor->ne[3] != 1) {
|
|
204
|
+
return layout;
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
int64_t n_elements = ggml_nelements(tensor);
|
|
208
|
+
const size_t alignment = 64; // Good for SIMD
|
|
209
|
+
|
|
210
|
+
// Check if requantization is needed (NPU-specific)
|
|
211
|
+
auto requant_type = ggml_openvino_get_requant_type(tensor, use_bias);
|
|
212
|
+
if (requant_type.has_value()) {
|
|
213
|
+
layout.is_requant = true;
|
|
214
|
+
layout.requant_type = requant_type;
|
|
215
|
+
|
|
216
|
+
// Special case: requant to F16 - just store F16 weights, no scales/zp
|
|
217
|
+
if (requant_type.value() == ExtraQuantType::F16) {
|
|
218
|
+
layout.weights_size = n_elements * sizeof(uint16_t); // F16 = 2 bytes
|
|
219
|
+
layout.total_size = layout.weights_size;
|
|
220
|
+
layout.weights_offset = 0;
|
|
221
|
+
// No scales/zp for F16
|
|
222
|
+
return layout;
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
// Requant to different quantized format (e.g., Q4_0_128)
|
|
226
|
+
switch (requant_type.value()) {
|
|
227
|
+
case ExtraQuantType::Q4_0_128:
|
|
228
|
+
layout.is_u4 = true;
|
|
229
|
+
layout.weights_per_block = 128;
|
|
230
|
+
layout.is_symmetric = true;
|
|
231
|
+
break;
|
|
232
|
+
case ExtraQuantType::Q4_0_C:
|
|
233
|
+
layout.is_u4 = true;
|
|
234
|
+
layout.weights_per_block = tensor->ne[0];
|
|
235
|
+
layout.is_symmetric = true;
|
|
236
|
+
break;
|
|
237
|
+
case ExtraQuantType::Q8_0_32:
|
|
238
|
+
layout.is_u4 = false;
|
|
239
|
+
layout.weights_per_block = 32;
|
|
240
|
+
layout.is_symmetric = true;
|
|
241
|
+
break;
|
|
242
|
+
case ExtraQuantType::Q8_0_C:
|
|
243
|
+
layout.is_u4 = false;
|
|
244
|
+
layout.weights_per_block = tensor->ne[0];
|
|
245
|
+
layout.is_symmetric = true;
|
|
246
|
+
break;
|
|
247
|
+
case ExtraQuantType::Q8_1_C:
|
|
248
|
+
layout.is_u4 = false;
|
|
249
|
+
layout.weights_per_block = tensor->ne[0];
|
|
250
|
+
break;
|
|
251
|
+
default:
|
|
252
|
+
layout.weights_per_block = -1;
|
|
253
|
+
GGML_ABORT("Code of re-quantizing to channel-wise is not updated");
|
|
254
|
+
break;
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
if (layout.is_requant) {
|
|
258
|
+
// Calculate sizes for requantized format
|
|
259
|
+
layout.weights_size = layout.is_u4 ? (n_elements / 2) : n_elements;
|
|
260
|
+
int64_t n_blocks = n_elements / layout.weights_per_block;
|
|
261
|
+
layout.scales_size = n_blocks * sizeof(uint16_t);
|
|
262
|
+
// For symmetric quantization, we only need one zp value (not one per block)
|
|
263
|
+
// Zero points are stored in U4 or U8 format matching the weight type
|
|
264
|
+
size_t n_zp_elements = layout.is_symmetric ? 1 : n_blocks;
|
|
265
|
+
layout.zp_size = layout.is_u4 ? ((n_zp_elements + 1) / 2) : n_zp_elements;
|
|
266
|
+
|
|
267
|
+
layout.weights_offset = 0;
|
|
268
|
+
layout.scales_offset = ((layout.weights_size + alignment - 1) / alignment) * alignment;
|
|
269
|
+
layout.zp_offset = layout.scales_offset + ((layout.scales_size + alignment - 1) / alignment) * alignment;
|
|
270
|
+
layout.total_size = layout.zp_offset + layout.zp_size;
|
|
271
|
+
layout.total_size = std::max(layout.total_size, ggml_nbytes(tensor));
|
|
272
|
+
return layout;
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
// Normal extraction (no requant) - determine format based on tensor type
|
|
277
|
+
layout.is_u4 = false;
|
|
278
|
+
layout.weights_per_block = 32;
|
|
279
|
+
layout.is_symmetric = false;
|
|
280
|
+
|
|
281
|
+
switch (tensor->type) {
|
|
282
|
+
case GGML_TYPE_Q4_0:
|
|
283
|
+
layout.is_u4 = true;
|
|
284
|
+
layout.is_symmetric = true;
|
|
285
|
+
break;
|
|
286
|
+
|
|
287
|
+
case GGML_TYPE_Q4_1:
|
|
288
|
+
case GGML_TYPE_Q4_K:
|
|
289
|
+
layout.is_u4 = true;
|
|
290
|
+
break;
|
|
291
|
+
|
|
292
|
+
case GGML_TYPE_Q8_0:
|
|
293
|
+
layout.is_symmetric = true;
|
|
294
|
+
break;
|
|
295
|
+
|
|
296
|
+
case GGML_TYPE_Q6_K:
|
|
297
|
+
layout.weights_per_block = 16;
|
|
298
|
+
layout.is_symmetric = true;
|
|
299
|
+
break;
|
|
300
|
+
|
|
301
|
+
case GGML_TYPE_Q5_K:
|
|
302
|
+
break;
|
|
303
|
+
|
|
304
|
+
default:
|
|
305
|
+
// Unsupported quantization type
|
|
306
|
+
return layout;
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
// Calculate sizes
|
|
310
|
+
// Weights: U4 = n_elements/2 bytes, U8 = n_elements bytes
|
|
311
|
+
layout.weights_size = layout.is_u4 ? (n_elements / 2) : n_elements;
|
|
312
|
+
|
|
313
|
+
// Scales: F16 per block
|
|
314
|
+
int64_t n_blocks = n_elements / layout.weights_per_block;
|
|
315
|
+
layout.scales_size = n_blocks * sizeof(uint16_t); // F16 = 2 bytes
|
|
316
|
+
// Zero points: U4 or U8 matching weight type
|
|
317
|
+
// For symmetric quantization, we only need one zp value (not one per block)
|
|
318
|
+
size_t n_zp_elements = layout.is_symmetric ? 1 : n_blocks;
|
|
319
|
+
layout.zp_size = layout.is_u4 ? ((n_zp_elements + 1) / 2) : n_zp_elements;
|
|
320
|
+
|
|
321
|
+
// Layout in buffer: [weights | scales | zp] with alignment
|
|
322
|
+
layout.weights_offset = 0;
|
|
323
|
+
layout.scales_offset = ((layout.weights_size + alignment - 1) / alignment) * alignment;
|
|
324
|
+
layout.zp_offset = layout.scales_offset + ((layout.scales_size + alignment - 1) / alignment) * alignment;
|
|
325
|
+
layout.total_size = layout.zp_offset + layout.zp_size;
|
|
326
|
+
layout.total_size = std::max(layout.total_size, ggml_nbytes(tensor));
|
|
327
|
+
|
|
328
|
+
return layout;
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
ggml_openvino_tensor_extra * ggml_openvino_create_tensor_extra(const ggml_tensor * tensor, bool is_remote) {
|
|
332
|
+
ov::Shape shape;
|
|
333
|
+
for (int i = GGML_MAX_DIMS - 1; i >= 0; --i) {
|
|
334
|
+
shape.push_back(static_cast<size_t>(tensor->ne[i]));
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
ov::element::Type element_type;
|
|
338
|
+
switch (tensor->type) {
|
|
339
|
+
case GGML_TYPE_F32:
|
|
340
|
+
element_type = ov::element::f32;
|
|
341
|
+
break;
|
|
342
|
+
case GGML_TYPE_F16:
|
|
343
|
+
element_type = ov::element::f16;
|
|
344
|
+
break;
|
|
345
|
+
case GGML_TYPE_BF16:
|
|
346
|
+
element_type = ov::element::bf16;
|
|
347
|
+
break;
|
|
348
|
+
case GGML_TYPE_I32:
|
|
349
|
+
element_type = ov::element::i32;
|
|
350
|
+
break;
|
|
351
|
+
case GGML_TYPE_I64:
|
|
352
|
+
element_type = ov::element::i64;
|
|
353
|
+
break;
|
|
354
|
+
default:
|
|
355
|
+
// GGML_LOG_WARN("%s: unsupported tensor type for ov::Tensor: %s\n", __func__, ggml_type_name(tensor->type));
|
|
356
|
+
return nullptr;
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
const auto & device_name = ggml_openvino_get_device_name();
|
|
360
|
+
auto remote_context = ggml_openvino_get_remote_context();
|
|
361
|
+
|
|
362
|
+
std::shared_ptr<ov::Tensor> ov_tensor;
|
|
363
|
+
if (is_remote) {
|
|
364
|
+
GGML_ASSERT(device_name == "GPU");
|
|
365
|
+
auto gpu_context = remote_context->as<ov::intel_gpu::ocl::ClContext>();
|
|
366
|
+
auto usm_tensor = gpu_context.create_tensor(element_type, shape, tensor->data);
|
|
367
|
+
ov_tensor = std::make_shared<ov::intel_gpu::ocl::USMTensor>(std::move(usm_tensor));
|
|
368
|
+
} else {
|
|
369
|
+
ov_tensor = std::make_shared<ov::Tensor>(element_type, shape, tensor->data);
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
return new ggml_openvino_tensor_extra(ov_tensor);
|
|
373
|
+
}
|