whispercpp 1.3.4 → 1.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +1 -1
- data/README.md +158 -44
- data/ext/extconf.rb +3 -2
- data/ext/ruby_whisper.c +34 -6
- data/ext/ruby_whisper.h +67 -0
- data/ext/ruby_whisper_context.c +236 -144
- data/ext/ruby_whisper_context_params.c +163 -0
- data/ext/ruby_whisper_model.c +12 -13
- data/ext/ruby_whisper_params.c +47 -24
- data/ext/ruby_whisper_segment.c +84 -20
- data/ext/ruby_whisper_token.c +371 -0
- data/ext/ruby_whisper_transcribe.cpp +5 -2
- data/ext/ruby_whisper_vad_context.c +122 -0
- data/ext/ruby_whisper_vad_context_detect.cpp +51 -0
- data/ext/ruby_whisper_vad_params.c +0 -1
- data/ext/ruby_whisper_vad_segment.c +138 -0
- data/ext/ruby_whisper_vad_segments.c +105 -0
- data/ext/sources/CMakeLists.txt +4 -1
- data/ext/sources/bindings/javascript/package.json +1 -1
- data/ext/sources/cmake/arm64-apple-clang.cmake +16 -0
- data/ext/sources/cmake/arm64-windows-llvm.cmake +16 -0
- data/ext/sources/cmake/riscv64-spacemit-linux-gnu-gcc.cmake +29 -0
- data/ext/sources/cmake/whisper-config.cmake.in +5 -40
- data/ext/sources/cmake/x64-windows-llvm.cmake +5 -0
- data/ext/sources/examples/addon.node/vad-example.js +2 -2
- data/ext/sources/examples/bench/bench.cpp +23 -18
- data/ext/sources/examples/cli/cli.cpp +129 -112
- data/ext/sources/examples/common-ggml.cpp +2 -0
- data/ext/sources/examples/lsp/CMakeLists.txt +2 -1
- data/ext/sources/examples/miniaudio.h +4507 -2131
- data/ext/sources/examples/quantize/CMakeLists.txt +2 -1
- data/ext/sources/examples/server/server.cpp +28 -15
- data/ext/sources/examples/talk-llama/CMakeLists.txt +8 -3
- data/ext/sources/examples/talk-llama/llama-adapter.cpp +5 -2
- data/ext/sources/examples/talk-llama/llama-adapter.h +7 -0
- data/ext/sources/examples/talk-llama/llama-arch.cpp +2378 -1988
- data/ext/sources/examples/talk-llama/llama-arch.h +109 -2
- data/ext/sources/examples/talk-llama/llama-batch.cpp +78 -34
- data/ext/sources/examples/talk-llama/llama-batch.h +17 -4
- data/ext/sources/examples/talk-llama/llama-chat.cpp +100 -4
- data/ext/sources/examples/talk-llama/llama-chat.h +5 -0
- data/ext/sources/examples/talk-llama/llama-context.cpp +1088 -403
- data/ext/sources/examples/talk-llama/llama-context.h +70 -23
- data/ext/sources/examples/talk-llama/llama-cparams.h +6 -0
- data/ext/sources/examples/talk-llama/llama-ext.h +12 -0
- data/ext/sources/examples/talk-llama/llama-grammar.cpp +295 -60
- data/ext/sources/examples/talk-llama/llama-grammar.h +22 -1
- data/ext/sources/examples/talk-llama/llama-graph.cpp +925 -155
- data/ext/sources/examples/talk-llama/llama-graph.h +234 -23
- data/ext/sources/examples/talk-llama/llama-hparams.cpp +79 -38
- data/ext/sources/examples/talk-llama/llama-hparams.h +118 -18
- data/ext/sources/examples/talk-llama/llama-impl.cpp +11 -7
- data/ext/sources/examples/talk-llama/llama-impl.h +14 -2
- data/ext/sources/examples/talk-llama/llama-kv-cache-iswa.cpp +8 -4
- data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +405 -140
- data/ext/sources/examples/talk-llama/llama-kv-cache.h +24 -10
- data/ext/sources/examples/talk-llama/llama-kv-cells.h +44 -2
- data/ext/sources/examples/talk-llama/llama-memory-hybrid-iswa.cpp +275 -0
- data/ext/sources/examples/talk-llama/llama-memory-hybrid-iswa.h +140 -0
- data/ext/sources/examples/talk-llama/llama-memory-hybrid.cpp +12 -10
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.cpp +42 -31
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.h +2 -2
- data/ext/sources/examples/talk-llama/llama-mmap.cpp +197 -45
- data/ext/sources/examples/talk-llama/llama-mmap.h +8 -3
- data/ext/sources/examples/talk-llama/llama-model-loader.cpp +606 -116
- data/ext/sources/examples/talk-llama/llama-model-loader.h +41 -5
- data/ext/sources/examples/talk-llama/llama-model-saver.cpp +61 -44
- data/ext/sources/examples/talk-llama/llama-model-saver.h +5 -2
- data/ext/sources/examples/talk-llama/llama-model.cpp +2756 -13643
- data/ext/sources/examples/talk-llama/llama-model.h +112 -18
- data/ext/sources/examples/talk-llama/llama-quant.cpp +582 -365
- data/ext/sources/examples/talk-llama/{llama-sampling.cpp → llama-sampler.cpp} +1409 -199
- data/ext/sources/examples/talk-llama/llama-sampler.h +42 -0
- data/ext/sources/examples/talk-llama/llama-vocab.cpp +248 -82
- data/ext/sources/examples/talk-llama/llama-vocab.h +50 -40
- data/ext/sources/examples/talk-llama/llama.cpp +802 -21
- data/ext/sources/examples/talk-llama/llama.h +210 -39
- data/ext/sources/examples/talk-llama/models/afmoe.cpp +190 -0
- data/ext/sources/examples/talk-llama/models/apertus.cpp +125 -0
- data/ext/sources/examples/talk-llama/models/arcee.cpp +135 -0
- data/ext/sources/examples/talk-llama/models/arctic.cpp +137 -0
- data/ext/sources/examples/talk-llama/models/arwkv7.cpp +86 -0
- data/ext/sources/examples/talk-llama/models/baichuan.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/bailingmoe.cpp +143 -0
- data/ext/sources/examples/talk-llama/models/bailingmoe2.cpp +133 -0
- data/ext/sources/examples/talk-llama/models/bert.cpp +184 -0
- data/ext/sources/examples/talk-llama/models/bitnet.cpp +145 -0
- data/ext/sources/examples/talk-llama/models/bloom.cpp +101 -0
- data/ext/sources/examples/talk-llama/models/chameleon.cpp +178 -0
- data/ext/sources/examples/talk-llama/models/chatglm.cpp +132 -0
- data/ext/sources/examples/talk-llama/models/codeshell.cpp +111 -0
- data/ext/sources/examples/talk-llama/models/cogvlm.cpp +102 -0
- data/ext/sources/examples/talk-llama/models/cohere2-iswa.cpp +134 -0
- data/ext/sources/examples/talk-llama/models/command-r.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/dbrx.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/deci.cpp +135 -0
- data/ext/sources/examples/talk-llama/models/deepseek.cpp +142 -0
- data/ext/sources/examples/talk-llama/models/deepseek2.cpp +262 -0
- data/ext/sources/examples/talk-llama/models/delta-net-base.cpp +445 -0
- data/ext/sources/examples/talk-llama/models/dots1.cpp +132 -0
- data/ext/sources/examples/talk-llama/models/dream.cpp +105 -0
- data/ext/sources/examples/talk-llama/models/ernie4-5-moe.cpp +148 -0
- data/ext/sources/examples/talk-llama/models/ernie4-5.cpp +110 -0
- data/ext/sources/examples/talk-llama/models/eurobert.cpp +97 -0
- data/ext/sources/examples/talk-llama/models/exaone-moe.cpp +145 -0
- data/ext/sources/examples/talk-llama/models/exaone.cpp +114 -0
- data/ext/sources/examples/talk-llama/models/exaone4.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/falcon-h1.cpp +111 -0
- data/ext/sources/examples/talk-llama/models/falcon.cpp +120 -0
- data/ext/sources/examples/talk-llama/models/gemma-embedding.cpp +116 -0
- data/ext/sources/examples/talk-llama/models/gemma.cpp +112 -0
- data/ext/sources/examples/talk-llama/models/gemma2-iswa.cpp +128 -0
- data/ext/sources/examples/talk-llama/models/gemma3.cpp +155 -0
- data/ext/sources/examples/talk-llama/models/gemma3n-iswa.cpp +384 -0
- data/ext/sources/examples/talk-llama/models/glm4-moe.cpp +170 -0
- data/ext/sources/examples/talk-llama/models/glm4.cpp +157 -0
- data/ext/sources/examples/talk-llama/models/gpt2.cpp +105 -0
- data/ext/sources/examples/talk-llama/models/gptneox.cpp +144 -0
- data/ext/sources/examples/talk-llama/models/granite-hybrid.cpp +195 -0
- data/ext/sources/examples/talk-llama/models/granite.cpp +210 -0
- data/ext/sources/examples/talk-llama/models/grok.cpp +159 -0
- data/ext/sources/examples/talk-llama/models/grovemoe.cpp +139 -0
- data/ext/sources/examples/talk-llama/models/hunyuan-dense.cpp +132 -0
- data/ext/sources/examples/talk-llama/models/hunyuan-moe.cpp +153 -0
- data/ext/sources/examples/talk-llama/models/internlm2.cpp +120 -0
- data/ext/sources/examples/talk-llama/models/jais.cpp +86 -0
- data/ext/sources/examples/talk-llama/models/jais2.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/jamba.cpp +106 -0
- data/ext/sources/examples/talk-llama/models/kimi-linear.cpp +381 -0
- data/ext/sources/examples/talk-llama/models/lfm2.cpp +196 -0
- data/ext/sources/examples/talk-llama/models/llada-moe.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/llada.cpp +99 -0
- data/ext/sources/examples/talk-llama/models/llama-iswa.cpp +178 -0
- data/ext/sources/examples/talk-llama/models/llama.cpp +175 -0
- data/ext/sources/examples/talk-llama/models/maincoder.cpp +117 -0
- data/ext/sources/examples/talk-llama/models/mamba-base.cpp +289 -0
- data/ext/sources/examples/talk-llama/models/mamba.cpp +54 -0
- data/ext/sources/examples/talk-llama/models/mimo2-iswa.cpp +129 -0
- data/ext/sources/examples/talk-llama/models/minicpm3.cpp +200 -0
- data/ext/sources/examples/talk-llama/models/minimax-m2.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/mistral3.cpp +160 -0
- data/ext/sources/examples/talk-llama/models/models.h +704 -0
- data/ext/sources/examples/talk-llama/models/modern-bert.cpp +109 -0
- data/ext/sources/examples/talk-llama/models/mpt.cpp +126 -0
- data/ext/sources/examples/talk-llama/models/nemotron-h.cpp +162 -0
- data/ext/sources/examples/talk-llama/models/nemotron.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/neo-bert.cpp +104 -0
- data/ext/sources/examples/talk-llama/models/olmo.cpp +121 -0
- data/ext/sources/examples/talk-llama/models/olmo2.cpp +150 -0
- data/ext/sources/examples/talk-llama/models/olmoe.cpp +124 -0
- data/ext/sources/examples/talk-llama/models/openai-moe-iswa.cpp +127 -0
- data/ext/sources/examples/talk-llama/models/openelm.cpp +124 -0
- data/ext/sources/examples/talk-llama/models/orion.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/paddleocr.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/pangu-embedded.cpp +121 -0
- data/ext/sources/examples/talk-llama/models/phi2.cpp +121 -0
- data/ext/sources/examples/talk-llama/models/phi3.cpp +152 -0
- data/ext/sources/examples/talk-llama/models/plamo.cpp +110 -0
- data/ext/sources/examples/talk-llama/models/plamo2.cpp +320 -0
- data/ext/sources/examples/talk-llama/models/plamo3.cpp +128 -0
- data/ext/sources/examples/talk-llama/models/plm.cpp +169 -0
- data/ext/sources/examples/talk-llama/models/qwen.cpp +108 -0
- data/ext/sources/examples/talk-llama/models/qwen2.cpp +126 -0
- data/ext/sources/examples/talk-llama/models/qwen2moe.cpp +151 -0
- data/ext/sources/examples/talk-llama/models/qwen2vl.cpp +117 -0
- data/ext/sources/examples/talk-llama/models/qwen3.cpp +120 -0
- data/ext/sources/examples/talk-llama/models/qwen35.cpp +381 -0
- data/ext/sources/examples/talk-llama/models/qwen35moe.cpp +422 -0
- data/ext/sources/examples/talk-llama/models/qwen3moe.cpp +131 -0
- data/ext/sources/examples/talk-llama/models/qwen3next.cpp +525 -0
- data/ext/sources/examples/talk-llama/models/qwen3vl-moe.cpp +140 -0
- data/ext/sources/examples/talk-llama/models/qwen3vl.cpp +132 -0
- data/ext/sources/examples/talk-llama/models/refact.cpp +94 -0
- data/ext/sources/examples/talk-llama/models/rnd1.cpp +126 -0
- data/ext/sources/examples/talk-llama/models/rwkv6-base.cpp +164 -0
- data/ext/sources/examples/talk-llama/models/rwkv6.cpp +94 -0
- data/ext/sources/examples/talk-llama/models/rwkv6qwen2.cpp +86 -0
- data/ext/sources/examples/talk-llama/models/rwkv7-base.cpp +137 -0
- data/ext/sources/examples/talk-llama/models/rwkv7.cpp +90 -0
- data/ext/sources/examples/talk-llama/models/seed-oss.cpp +124 -0
- data/ext/sources/examples/talk-llama/models/smallthinker.cpp +126 -0
- data/ext/sources/examples/talk-llama/models/smollm3.cpp +128 -0
- data/ext/sources/examples/talk-llama/models/stablelm.cpp +146 -0
- data/ext/sources/examples/talk-llama/models/starcoder.cpp +100 -0
- data/ext/sources/examples/talk-llama/models/starcoder2.cpp +121 -0
- data/ext/sources/examples/talk-llama/models/step35-iswa.cpp +165 -0
- data/ext/sources/examples/talk-llama/models/t5-dec.cpp +166 -0
- data/ext/sources/examples/talk-llama/models/t5-enc.cpp +96 -0
- data/ext/sources/examples/talk-llama/models/wavtokenizer-dec.cpp +149 -0
- data/ext/sources/examples/talk-llama/models/xverse.cpp +108 -0
- data/ext/sources/examples/talk-llama/unicode.cpp +121 -79
- data/ext/sources/examples/vad-speech-segments/CMakeLists.txt +1 -1
- data/ext/sources/examples/whisper.wasm/index-tmpl.html +1 -1
- data/ext/sources/ggml/CMakeLists.txt +90 -56
- data/ext/sources/ggml/include/ggml-alloc.h +9 -0
- data/ext/sources/ggml/include/ggml-backend.h +5 -2
- data/ext/sources/ggml/include/ggml-cann.h +1 -1
- data/ext/sources/ggml/include/ggml-cpu.h +6 -0
- data/ext/sources/ggml/include/ggml-hexagon.h +19 -0
- data/ext/sources/ggml/include/ggml-openvino.h +37 -0
- data/ext/sources/ggml/include/ggml-opt.h +1 -1
- data/ext/sources/ggml/include/ggml-rpc.h +14 -12
- data/ext/sources/ggml/include/ggml-virtgpu.h +14 -0
- data/ext/sources/ggml/include/ggml-zendnn.h +22 -0
- data/ext/sources/ggml/include/ggml.h +246 -21
- data/ext/sources/ggml/src/CMakeLists.txt +85 -11
- data/ext/sources/ggml/src/ggml-alloc.c +128 -50
- data/ext/sources/ggml/src/ggml-backend-dl.cpp +48 -0
- data/ext/sources/ggml/src/ggml-backend-dl.h +45 -0
- data/ext/sources/ggml/src/ggml-backend-impl.h +1 -4
- data/ext/sources/ggml/src/ggml-backend-reg.cpp +54 -88
- data/ext/sources/ggml/src/ggml-backend.cpp +76 -23
- data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +18 -4
- data/ext/sources/ggml/src/ggml-blas/ggml-blas.cpp +11 -11
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.cpp +58 -46
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.h +139 -48
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.cpp +2427 -1785
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +238 -362
- data/ext/sources/ggml/src/ggml-cann/common.h +285 -211
- data/ext/sources/ggml/src/ggml-cann/ggml-cann.cpp +663 -831
- data/ext/sources/ggml/src/ggml-common.h +11 -0
- data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +170 -95
- data/ext/sources/ggml/src/ggml-cpu/amx/amx.cpp +42 -18
- data/ext/sources/ggml/src/ggml-cpu/amx/common.h +34 -10
- data/ext/sources/ggml/src/ggml-cpu/amx/mmq.cpp +85 -85
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp +4 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/quants.c +513 -27
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/repack.cpp +4192 -992
- data/ext/sources/ggml/src/ggml-cpu/arch/loongarch/quants.c +4 -5
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/cpu-feats.cpp +38 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/quants.c +1761 -49
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/repack.cpp +1391 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/s390/cpu-feats.cpp +50 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/s390/quants.c +8 -10
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/quants.c +9 -9
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/repack.cpp +124 -24
- data/ext/sources/ggml/src/ggml-cpu/arch-fallback.h +157 -28
- data/ext/sources/ggml/src/ggml-cpu/binary-ops.cpp +2 -6
- data/ext/sources/ggml/src/ggml-cpu/common.h +8 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-impl.h +8 -3
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +251 -80
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.cpp +19 -0
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +587 -119
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.h +33 -44
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +1093 -194
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +1284 -203
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.h +6 -0
- data/ext/sources/ggml/src/ggml-cpu/ops.cpp +1519 -527
- data/ext/sources/ggml/src/ggml-cpu/ops.h +6 -4
- data/ext/sources/ggml/src/ggml-cpu/quants.c +40 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.h +3 -0
- data/ext/sources/ggml/src/ggml-cpu/repack.cpp +3632 -781
- data/ext/sources/ggml/src/ggml-cpu/repack.h +129 -4
- data/ext/sources/ggml/src/ggml-cpu/simd-gemm.h +136 -0
- data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +152 -46
- data/ext/sources/ggml/src/ggml-cpu/spacemit/ime.cpp +3 -2
- data/ext/sources/ggml/src/ggml-cpu/unary-ops.cpp +152 -1
- data/ext/sources/ggml/src/ggml-cpu/unary-ops.h +7 -0
- data/ext/sources/ggml/src/ggml-cpu/vec.cpp +140 -0
- data/ext/sources/ggml/src/ggml-cpu/vec.h +261 -146
- data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +72 -1
- data/ext/sources/ggml/src/ggml-cuda/argmax.cu +2 -2
- data/ext/sources/ggml/src/ggml-cuda/argsort.cu +132 -6
- data/ext/sources/ggml/src/ggml-cuda/argsort.cuh +16 -0
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +33 -31
- data/ext/sources/ggml/src/ggml-cuda/common.cuh +474 -85
- data/ext/sources/ggml/src/ggml-cuda/convert.cu +41 -27
- data/ext/sources/ggml/src/ggml-cuda/convert.cuh +10 -0
- data/ext/sources/ggml/src/ggml-cuda/cpy-utils.cuh +1 -1
- data/ext/sources/ggml/src/ggml-cuda/cpy.cu +342 -246
- data/ext/sources/ggml/src/ggml-cuda/cpy.cuh +1 -5
- data/ext/sources/ggml/src/ggml-cuda/cumsum.cu +307 -0
- data/ext/sources/ggml/src/ggml-cuda/cumsum.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/diag.cu +77 -0
- data/ext/sources/ggml/src/ggml-cuda/diag.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +98 -74
- data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +973 -665
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cu +35 -741
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cuh +1255 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec.cuh +33 -40
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +40 -18
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +48 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn.cu +206 -45
- data/ext/sources/ggml/src/ggml-cuda/fill.cu +37 -0
- data/ext/sources/ggml/src/ggml-cuda/fill.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/gated_delta_net.cu +263 -0
- data/ext/sources/ggml/src/ggml-cuda/gated_delta_net.cuh +4 -0
- data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +1688 -302
- data/ext/sources/ggml/src/ggml-cuda/mean.cu +12 -10
- data/ext/sources/ggml/src/ggml-cuda/mma.cuh +908 -48
- data/ext/sources/ggml/src/ggml-cuda/mmf.cu +88 -20
- data/ext/sources/ggml/src/ggml-cuda/mmf.cuh +502 -90
- data/ext/sources/ggml/src/ggml-cuda/mmid.cu +164 -0
- data/ext/sources/ggml/src/ggml-cuda/mmid.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/mmq.cu +69 -176
- data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +532 -193
- data/ext/sources/ggml/src/ggml-cuda/mmvf.cu +460 -104
- data/ext/sources/ggml/src/ggml-cuda/mmvf.cuh +5 -2
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +360 -122
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cuh +2 -1
- data/ext/sources/ggml/src/ggml-cuda/norm.cu +18 -76
- data/ext/sources/ggml/src/ggml-cuda/pad.cu +73 -39
- data/ext/sources/ggml/src/ggml-cuda/quantize.cu +152 -1
- data/ext/sources/ggml/src/ggml-cuda/quantize.cuh +14 -0
- data/ext/sources/ggml/src/ggml-cuda/reduce_rows.cuh +2 -16
- data/ext/sources/ggml/src/ggml-cuda/rope.cu +364 -149
- data/ext/sources/ggml/src/ggml-cuda/rope.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/set-rows.cu +101 -47
- data/ext/sources/ggml/src/ggml-cuda/set.cu +39 -0
- data/ext/sources/ggml/src/ggml-cuda/set.cuh +7 -0
- data/ext/sources/ggml/src/ggml-cuda/softmax.cu +163 -41
- data/ext/sources/ggml/src/ggml-cuda/solve_tri.cu +275 -0
- data/ext/sources/ggml/src/ggml-cuda/solve_tri.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +68 -50
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cuh +1 -1
- data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cu +49 -84
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_32.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_32.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq112-dv112.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq128-dv128.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq256-dv256.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq40-dv40.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq576-dv512.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq64-dv64.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq72-dv72.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq80-dv80.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-tile-instance-dkq96-dv96.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +22 -4
- data/ext/sources/ggml/src/ggml-cuda/top-k.cu +95 -0
- data/ext/sources/ggml/src/ggml-cuda/top-k.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cu +275 -119
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cuh +20 -7
- data/ext/sources/ggml/src/ggml-cuda/tri.cu +136 -0
- data/ext/sources/ggml/src/ggml-cuda/tri.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/unary.cu +160 -11
- data/ext/sources/ggml/src/ggml-cuda/unary.cuh +38 -0
- data/ext/sources/ggml/src/ggml-cuda/upscale.cu +163 -7
- data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +31 -17
- data/ext/sources/ggml/src/ggml-cuda/vendors/cuda.h +4 -0
- data/ext/sources/ggml/src/ggml-cuda/vendors/hip.h +22 -1
- data/ext/sources/ggml/src/ggml-cuda/vendors/musa.h +6 -0
- data/ext/sources/ggml/src/ggml-hexagon/CMakeLists.txt +117 -0
- data/ext/sources/ggml/src/ggml-hexagon/ggml-hexagon.cpp +3325 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/CMakeLists.txt +46 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/act-ops.c +813 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/argsort-ops.c +281 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/binary-ops.c +891 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/cmake-toolchain.cmake +157 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/cpy-ops.c +252 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/flash-attn-ops.c +713 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/get-rows-ops.c +112 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-dma.c +63 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-dma.h +182 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-dump.h +77 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-fastdiv.h +37 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-utils.h +51 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ctx.h +35 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-msg.h +155 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ops.h +63 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp_iface.idl +16 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-arith.h +443 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-base.h +240 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-copy.h +245 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-div.h +251 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-dump.h +129 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-exp.h +215 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-floor.h +100 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-inverse.h +210 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-reduce.h +296 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-scale.h +133 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sigmoid.h +141 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sqrt.h +126 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-types.h +36 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-utils.h +26 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/main.c +1199 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/matmul-ops.c +2670 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/rope-ops.c +497 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/set-rows-ops.c +168 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/softmax-ops.c +419 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/ssm-conv.c +339 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/sum-rows-ops.c +128 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/unary-ops.c +382 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/worker-pool.c +293 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/worker-pool.h +57 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp-drv.cpp +418 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp-drv.h +121 -0
- data/ext/sources/ggml/src/ggml-hexagon/libdl.h +79 -0
- data/ext/sources/ggml/src/ggml-hexagon/libggml-htp.inf +38 -0
- data/ext/sources/ggml/src/ggml-hexagon/op-desc.h +153 -0
- data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +14 -13
- data/ext/sources/ggml/src/ggml-impl.h +129 -6
- data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +10 -10
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-common.cpp +15 -4
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.h +8 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.m +173 -34
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.cpp +912 -344
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.h +124 -59
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.m +588 -144
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +396 -23
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.cpp +1724 -421
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.h +16 -3
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.cpp +333 -114
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.metal +3050 -1539
- data/ext/sources/ggml/src/ggml-musa/CMakeLists.txt +3 -1
- data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +30 -1
- data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +4279 -497
- data/ext/sources/ggml/src/ggml-opencl/kernels/concat.cl +41 -99
- data/ext/sources/ggml/src/ggml-opencl/kernels/cpy.cl +45 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cumsum.cl +139 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +267 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/diag.cl +27 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/exp.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/expm1.cl +113 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/fill.cl +17 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/flash_attn_f32.cl +4 -3
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemm_moe_mxfp4_f32.cl +162 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemm_noshuffle_q4_1_f32.cl +132 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_moe_mxfp4_f32.cl +156 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general_q8_0_f32.cl +195 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_q4_1_f32.cl +283 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/get_rows.cl +36 -12
- data/ext/sources/ggml/src/ggml-opencl/kernels/l2_norm.cl +71 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mean.cl +140 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f16_f32_kq_kqv.cl +273 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f16_f32_l4_lm.cl +24 -10
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f32_f32_l4_lm.cl +24 -10
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q4_0_f32_l4_lm.cl +163 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q4_1_f32_l4_lm.cl +165 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q6_k_f32_l4_lm.cl +158 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q8_0_f32_8x4.cl +129 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q8_0_f32_l4_lm.cl +154 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_1_f32.cl +219 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_1_f32_flat.cl +229 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_k_f32.cl +180 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/{mul_mv_q6_k.cl → mul_mv_q6_k_f32.cl} +4 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q6_k_f32_flat.cl +194 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/neg.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/pad.cl +29 -20
- data/ext/sources/ggml/src/ggml-opencl/kernels/repeat.cl +31 -32
- data/ext/sources/ggml/src/ggml-opencl/kernels/rms_norm.cl +25 -10
- data/ext/sources/ggml/src/ggml-opencl/kernels/rope.cl +50 -24
- data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +14 -4
- data/ext/sources/ggml/src/ggml-opencl/kernels/set_rows.cl +35 -16
- data/ext/sources/ggml/src/ggml-opencl/kernels/softplus.cl +116 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/solve_tri.cl +51 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sqr.cl +53 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sqrt.cl +53 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/ssm_conv.cl +77 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sum_rows.cl +114 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/tanh.cl +94 -48
- data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +39 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/tri.cl +32 -0
- data/ext/sources/ggml/src/ggml-openvino/.clang-format +154 -0
- data/ext/sources/ggml/src/ggml-openvino/CMakeLists.txt +22 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-decoder.cpp +975 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-decoder.h +294 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino-extra.cpp +373 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino-extra.h +182 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino.cpp +1110 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-quants.cpp +884 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-quants.h +153 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/decoder.h +74 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/frontend.cpp +27 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/frontend.h +23 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/input_model.cpp +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/input_model.h +29 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/node_context.h +112 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/cont.cpp +48 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/cpy.cpp +21 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/flash_attn_ext.cpp +90 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/get_rows.cpp +69 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/glu_geglu.cpp +61 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/glu_swiglu.cpp +62 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/mulmat.cpp +90 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/permute.cpp +102 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/reshape.cpp +83 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/rms_norm.cpp +46 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/rope.cpp +123 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/scale.cpp +41 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/set_rows.cpp +76 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/softmax.cpp +89 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/transpose.cpp +23 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/unary_silu.cpp +27 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/view.cpp +53 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op_table.cpp +46 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op_table.h +39 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/eliminate_zp.cpp +123 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/eliminate_zp.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/fuse_to_sdpa.cpp +60 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/fuse_to_sdpa.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/mark_decompression_convert_constant_folding.h +29 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/squeeze_matmul.cpp +58 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/squeeze_matmul.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/translate_session.cpp +293 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/translate_session.h +28 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/utils.cpp +226 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/utils.h +85 -0
- data/ext/sources/ggml/src/ggml-openvino/utils.cpp +823 -0
- data/ext/sources/ggml/src/ggml-openvino/utils.h +123 -0
- data/ext/sources/ggml/src/ggml-quants.c +96 -5
- data/ext/sources/ggml/src/ggml-quants.h +3 -0
- data/ext/sources/ggml/src/ggml-rpc/ggml-rpc.cpp +438 -156
- data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +59 -87
- data/ext/sources/ggml/src/ggml-sycl/add-id.cpp +81 -0
- data/ext/sources/ggml/src/ggml-sycl/add-id.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/backend.hpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +21 -29
- data/ext/sources/ggml/src/ggml-sycl/binbcast.hpp +0 -6
- data/ext/sources/ggml/src/ggml-sycl/common.hpp +427 -20
- data/ext/sources/ggml/src/ggml-sycl/concat.cpp +55 -44
- data/ext/sources/ggml/src/ggml-sycl/convert.cpp +103 -1
- data/ext/sources/ggml/src/ggml-sycl/convert.hpp +22 -1
- data/ext/sources/ggml/src/ggml-sycl/count-equal.cpp +79 -0
- data/ext/sources/ggml/src/ggml-sycl/count-equal.hpp +9 -0
- data/ext/sources/ggml/src/ggml-sycl/cpy.cpp +0 -3
- data/ext/sources/ggml/src/ggml-sycl/dequantize.hpp +18 -0
- data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +867 -50
- data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +401 -358
- data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +12 -2
- data/ext/sources/ggml/src/ggml-sycl/fattn-common.hpp +1179 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-tile.cpp +55 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-tile.hpp +1338 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-vec.hpp +667 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn.cpp +225 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn.hpp +22 -0
- data/ext/sources/ggml/src/ggml-sycl/gated_delta_net.cpp +309 -0
- data/ext/sources/ggml/src/ggml-sycl/gated_delta_net.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/ggml-sycl.cpp +645 -155
- data/ext/sources/ggml/src/ggml-sycl/mmvq.cpp +22 -0
- data/ext/sources/ggml/src/ggml-sycl/norm.cpp +221 -66
- data/ext/sources/ggml/src/ggml-sycl/norm.hpp +2 -0
- data/ext/sources/ggml/src/ggml-sycl/outprod.cpp +3 -3
- data/ext/sources/ggml/src/ggml-sycl/pad.cpp +97 -0
- data/ext/sources/ggml/src/ggml-sycl/pad.hpp +24 -0
- data/ext/sources/ggml/src/ggml-sycl/pad_reflect_1d.cpp +100 -0
- data/ext/sources/ggml/src/ggml-sycl/pad_reflect_1d.hpp +10 -0
- data/ext/sources/ggml/src/ggml-sycl/presets.hpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/quants.hpp +1 -1
- data/ext/sources/ggml/src/ggml-sycl/repeat_back.cpp +76 -0
- data/ext/sources/ggml/src/ggml-sycl/repeat_back.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/roll.cpp +122 -0
- data/ext/sources/ggml/src/ggml-sycl/roll.hpp +20 -0
- data/ext/sources/ggml/src/ggml-sycl/rope.cpp +457 -281
- data/ext/sources/ggml/src/ggml-sycl/rope.hpp +6 -0
- data/ext/sources/ggml/src/ggml-sycl/set.cpp +73 -0
- data/ext/sources/ggml/src/ggml-sycl/set.hpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/softmax.cpp +327 -162
- data/ext/sources/ggml/src/ggml-sycl/softmax.hpp +4 -0
- data/ext/sources/ggml/src/ggml-sycl/ssm_conv.cpp +127 -0
- data/ext/sources/ggml/src/ggml-sycl/ssm_conv.hpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq112-dv112.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq128-dv128.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq256-dv256.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq40-dv40.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq576-dv512.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq64-dv64.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq72-dv72.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq80-dv80.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq96-dv96.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +71 -0
- data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +1 -1
- data/ext/sources/ggml/src/ggml-virtgpu/CMakeLists.txt +70 -0
- data/ext/sources/ggml/src/ggml-virtgpu/apir_cs_ggml-rpc-front.cpp +87 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/CMakeLists.txt +21 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/apir_cs_ggml-rpc-back.cpp +115 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-convert.h +13 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-backend.cpp +102 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-buffer-type.cpp +105 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-buffer.cpp +179 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-device.cpp +148 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.cpp +51 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.gen.h +73 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.h +27 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-virgl-apir.h +32 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend.cpp +144 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/api_remoting.h +95 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_backend.gen.h +94 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_backend.h +50 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs.h +378 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs_ggml.h +232 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs_rpc.h +58 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-buffer-type.cpp +81 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-buffer.cpp +119 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-device.cpp +158 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-reg.cpp +213 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend.cpp +69 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-remoting.h +71 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggmlremoting_functions.yaml +166 -0
- data/ext/sources/ggml/src/ggml-virtgpu/include/apir_hw.h +9 -0
- data/ext/sources/ggml/src/ggml-virtgpu/regenerate_remoting.py +333 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-apir.h +15 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-backend.cpp +58 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-buffer-type.cpp +110 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-buffer.cpp +173 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-device.cpp +192 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-impl.h +36 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward.gen.h +53 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-shm.cpp +98 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-shm.h +23 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-utils.cpp +179 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-utils.h +86 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu.cpp +544 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu.h +117 -0
- data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +39 -19
- data/ext/sources/ggml/src/ggml-vulkan/ggml-vulkan.cpp +5994 -3055
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/abs.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +18 -10
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add1.comp +28 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add_id.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/arange.comp +20 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +33 -26
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort_large.comp +114 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/ceil.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp +47 -49
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv_transpose_1d.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +4 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_transpose.comp +67 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/count_equal.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/count_experts.comp +51 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cumsum.comp +83 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cumsum_multipass1.comp +60 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cumsum_multipass2.comp +66 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{dequant_funcs.comp → dequant_funcs.glsl} +9 -21
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{dequant_funcs_cm2.comp → dequant_funcs_cm2.glsl} +18 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{dequant_head.comp → dequant_head.glsl} +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_m.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_s.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_mxfp4.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/diag.comp +29 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/div.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/elu.comp +27 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/exp.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/fill.comp +19 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +386 -160
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{flash_attn_base.comp → flash_attn_base.glsl} +82 -20
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +400 -174
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +123 -37
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_mask_opt.comp +162 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +10 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/floor.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gated_delta_net.comp +128 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu_erf.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu_quick.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_erf.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{generic_binary_head.comp → generic_binary_head.glsl} +17 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{generic_head.comp → generic_head.glsl} +2 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{generic_unary_head.comp → generic_unary_head.glsl} +7 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +4 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{glu_head.comp → glu_head.glsl} +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/hardsigmoid.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/hardswish.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +19 -7
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col_3d.comp +2 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +13 -10
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/log.comp +18 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{mul_mat_vec_base.comp → mul_mat_vec_base.glsl} +77 -29
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iface.glsl +35 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_m.comp +71 -21
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp +41 -25
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_s.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xs.comp +44 -26
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xxs.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_s.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_xxs.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +9 -7
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp +9 -7
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +4 -6
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +4 -6
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +4 -6
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vecq.comp +39 -36
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vecq_funcs.glsl +494 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +88 -105
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +41 -26
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{mul_mm_funcs.comp → mul_mm_funcs.glsl} +69 -59
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_id_funcs.glsl +74 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +92 -230
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.glsl +454 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_shmem_types.glsl +78 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/multi_add.comp +97 -13
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/neg.comp +20 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_adamw.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_sgd.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +21 -6
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +10 -10
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/reglu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat_back.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +49 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_back.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_partials.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/roll.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_funcs.glsl +207 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.glsl +20 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +8 -49
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +8 -32
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +8 -32
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_params.glsl +33 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +8 -38
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/round.comp +29 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sgn.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sigmoid.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu_back.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_large1.comp +62 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_large2.comp +79 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_large3.comp +65 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_large_common.glsl +53 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/softplus.comp +23 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/solve_tri.comp +81 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sqrt.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/square.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp +50 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/ssm_scan.comp +124 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/step.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sub.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +2 -25
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.glsl +25 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/swiglu.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/swiglu_oai.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/topk_argsort.comp +118 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/topk_moe.comp +213 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/topk_nary_search.comp +246 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/tri.comp +43 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/trunc.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{types.comp → types.glsl} +345 -26
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +90 -12
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +384 -180
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/xielu.comp +35 -0
- data/ext/sources/ggml/src/ggml-webgpu/CMakeLists.txt +28 -2
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu-shader-lib.hpp +1374 -0
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu.cpp +2544 -726
- data/ext/sources/ggml/src/ggml-webgpu/pre_wgsl.hpp +778 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argmax.wgsl +72 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argsort.wgsl +106 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argsort_merge.wgsl +134 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary.wgsl +141 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/common_decls.tmpl +65 -72
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/concat.wgsl +75 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cpy.tmpl.wgsl +107 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cumsum.wgsl +66 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/embed_wgsl.py +73 -15
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/flash_attn.wgsl +636 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{get_rows.tmpl.wgsl → get_rows.wgsl} +53 -259
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/glu.tmpl.wgsl +323 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat.tmpl.wgsl → mul_mat.wgsl} +72 -261
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_decls.tmpl +766 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_reg_tile.wgsl +147 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_subgroup_matrix.wgsl +196 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_vec.wgsl +480 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/pad.wgsl +86 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/repeat.wgsl +67 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm.wgsl +83 -17
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rope.tmpl.wgsl +295 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/scale.wgsl +63 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.wgsl +40 -12
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/soft_max.tmpl.wgsl +345 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/sum_rows.wgsl +55 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/unary.wgsl +193 -0
- data/ext/sources/ggml/src/ggml-zdnn/ggml-zdnn.cpp +6 -1
- data/ext/sources/ggml/src/ggml-zendnn/CMakeLists.txt +91 -0
- data/ext/sources/ggml/src/ggml-zendnn/ggml-zendnn.cpp +469 -0
- data/ext/sources/ggml/src/ggml.c +590 -64
- data/ext/sources/ggml/src/gguf.cpp +229 -44
- data/ext/sources/include/whisper.h +1 -0
- data/ext/sources/src/CMakeLists.txt +3 -1
- data/ext/sources/src/whisper.cpp +106 -62
- data/ext/sources/tests/CMakeLists.txt +2 -2
- data/ext/sources/tests/test-vad-full.cpp +4 -2
- data/ext/sources/tests/test-vad.cpp +1 -1
- data/extsources.rb +1 -0
- data/lib/whisper/model/uri.rb +17 -18
- data/sig/whisper.rbs +162 -4
- data/test/test_context_params.rb +82 -0
- data/test/test_params.rb +16 -8
- data/test/test_segment.rb +0 -1
- data/test/test_token.rb +81 -0
- data/test/test_vad.rb +1 -1
- data/test/test_vad_context.rb +100 -0
- data/test/test_vad_segment.rb +19 -0
- data/test/test_vad_segments.rb +16 -0
- data/test/test_whisper.rb +27 -0
- data/whispercpp.gemspec +1 -1
- metadata +502 -37
- data/ext/sources/build-xcframework.sh +0 -571
- data/ext/sources/examples/talk-llama/llama-sampling.h +0 -32
- data/ext/sources/ggml/cmake/BuildTypes.cmake +0 -54
- data/ext/sources/ggml/src/ggml-cann/Doxyfile +0 -2579
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +0 -105
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +0 -55
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/add.tmpl.wgsl +0 -44
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/add_in_place.tmpl.wgsl +0 -41
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary_head.tmpl +0 -45
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cpy.wgsl +0 -60
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul.tmpl.wgsl +0 -44
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_in_place.tmpl.wgsl +0 -41
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm_in_place.wgsl +0 -48
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{test_bfloat16_support.comp → feature-tests/bfloat16.comp} +0 -0
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{test_coopmat_support.comp → feature-tests/coopmat.comp} +0 -0
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{test_coopmat2_support.comp → feature-tests/coopmat2.comp} +0 -0
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{test_integer_dot_support.comp → feature-tests/integer_dot.comp} +0 -0
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{glu_main.comp → glu_main.glsl} +0 -0
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{rte.comp → rte.glsl} +0 -0
- /data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/{utils.comp → utils.glsl} +0 -0
|
@@ -0,0 +1,884 @@
|
|
|
1
|
+
#include "ggml-quants.h"
|
|
2
|
+
|
|
3
|
+
#include "ggml-common.h"
|
|
4
|
+
#include "ggml-impl.h"
|
|
5
|
+
#include "ggml.h"
|
|
6
|
+
|
|
7
|
+
#include <algorithm>
|
|
8
|
+
#include <cassert>
|
|
9
|
+
#include <cmath>
|
|
10
|
+
#include <cstddef>
|
|
11
|
+
#include <cstdint>
|
|
12
|
+
#include <limits>
|
|
13
|
+
#include <memory>
|
|
14
|
+
#include <openvino/core/except.hpp>
|
|
15
|
+
#include <openvino/core/node.hpp>
|
|
16
|
+
#include <openvino/core/node_output.hpp>
|
|
17
|
+
#include <openvino/core/parallel.hpp>
|
|
18
|
+
#include <openvino/core/shape.hpp>
|
|
19
|
+
#include <openvino/core/type/element_type.hpp>
|
|
20
|
+
#include <openvino/core/type/element_type_traits.hpp>
|
|
21
|
+
#include <openvino/core/type/float16.hpp>
|
|
22
|
+
#include <openvino/op/add.hpp>
|
|
23
|
+
#include <openvino/op/constant.hpp>
|
|
24
|
+
#include <openvino/op/convert.hpp>
|
|
25
|
+
#include <openvino/op/multiply.hpp>
|
|
26
|
+
#include <openvino/op/reshape.hpp>
|
|
27
|
+
#include <openvino/op/subtract.hpp>
|
|
28
|
+
#include <openvino/op/util/attr_types.hpp>
|
|
29
|
+
#include <openvino/runtime/tensor.hpp>
|
|
30
|
+
#include <string>
|
|
31
|
+
#include <vector>
|
|
32
|
+
|
|
33
|
+
void unpack_32_4(const uint8_t * data, uint8_t * dst) {
|
|
34
|
+
std::fill_n(dst, 16, 0);
|
|
35
|
+
for (int j = 0; j < 16; ++j) {
|
|
36
|
+
uint8_t x = (data[j] & 0x0F);
|
|
37
|
+
uint8_t y = (data[j] >> 4);
|
|
38
|
+
if (j % 2 != 0) {
|
|
39
|
+
x <<= 4;
|
|
40
|
+
y <<= 4;
|
|
41
|
+
}
|
|
42
|
+
dst[j / 2] |= x;
|
|
43
|
+
dst[8 + j / 2] |= y; // Last 16 weights are in the higher bits
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// Extracts (weight, scales, zp) from Q4_0 tensors.
|
|
48
|
+
// Data layout is: |16 bit scale|32 x 4bit weights|.
|
|
49
|
+
void extract_q4_0_data(const ggml_tensor * tensor,
|
|
50
|
+
ov::Tensor & weights_arr,
|
|
51
|
+
ov::Tensor & scales_arr,
|
|
52
|
+
ov::Tensor & zp_arr) {
|
|
53
|
+
const uint64_t bytes_per_block = 18; // 2 bytes scale, 32x0.5 byte weights
|
|
54
|
+
|
|
55
|
+
auto * data = static_cast<uint8_t *>(tensor->data);
|
|
56
|
+
auto * weights = static_cast<uint8_t *>(weights_arr.data());
|
|
57
|
+
auto * scales = scales_arr.data<ov::element_type_traits<ov::element::f16>::value_type>();
|
|
58
|
+
auto * zp = static_cast<uint8_t *>(zp_arr.data());
|
|
59
|
+
|
|
60
|
+
bool is_scalar_zp = (zp_arr.get_size() == 1); // Symmetric quantization
|
|
61
|
+
|
|
62
|
+
// For Q4_0, zero point is always 8
|
|
63
|
+
if (is_scalar_zp) {
|
|
64
|
+
zp[0] = 8 | (8 << 4); // Pack two 4-bit values
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
ov::parallel_for(scales_arr.get_size(), [&](size_t i) {
|
|
68
|
+
scales[i] = ov::float16::from_bits(*((uint16_t *) (data + i * bytes_per_block)));
|
|
69
|
+
// For asymmetric quantization, compute per-block zero points
|
|
70
|
+
if (!is_scalar_zp) {
|
|
71
|
+
// Pack two 4-bit zero points per byte
|
|
72
|
+
if (i % 2 == 0) {
|
|
73
|
+
zp[i / 2] = 8; // Lower nibble
|
|
74
|
+
} else {
|
|
75
|
+
zp[i / 2] |= (8 << 4); // Upper nibble
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
unpack_32_4(data + i * bytes_per_block + 2, weights + i * 16);
|
|
79
|
+
});
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// Extracts (weight, scales, zp) from Q4_1 tensors.
|
|
83
|
+
// Data layout is: |16 bit scale|16 bit min|32 x 4bit weights|.
|
|
84
|
+
void extract_q4_1_data(const ggml_tensor * tensor,
|
|
85
|
+
ov::Tensor & weights_arr,
|
|
86
|
+
ov::Tensor & scales_arr,
|
|
87
|
+
ov::Tensor & zp_arr,
|
|
88
|
+
bool use_bias) {
|
|
89
|
+
const uint64_t bytes_per_block = 20; // 2 bytes scale, 2 bytes min, 32x0.5 byte weights
|
|
90
|
+
|
|
91
|
+
auto * data = static_cast<uint8_t *>(tensor->data);
|
|
92
|
+
auto * weights = static_cast<uint8_t *>(weights_arr.data());
|
|
93
|
+
auto * scales = scales_arr.data<ov::element_type_traits<ov::element::f16>::value_type>();
|
|
94
|
+
|
|
95
|
+
if (use_bias) {
|
|
96
|
+
// Store bias (min) directly as f16 instead of computing u4 zero points
|
|
97
|
+
auto * bias = zp_arr.data<ov::element_type_traits<ov::element::f16>::value_type>();
|
|
98
|
+
ov::parallel_for(scales_arr.get_size(), [&](size_t i) {
|
|
99
|
+
float scale = static_cast<float>(ov::float16::from_bits(*((uint16_t *) (data + i * bytes_per_block))));
|
|
100
|
+
float min = static_cast<float>(ov::float16::from_bits(*((uint16_t *) (data + i * bytes_per_block + 2))));
|
|
101
|
+
scales[i] = ov::float16(scale);
|
|
102
|
+
bias[i] = ov::float16(min); // bias = min, dequant: w*s + bias
|
|
103
|
+
unpack_32_4(data + i * bytes_per_block + 4, weights + i * 16);
|
|
104
|
+
});
|
|
105
|
+
} else {
|
|
106
|
+
auto * zp = static_cast<uint8_t *>(zp_arr.data());
|
|
107
|
+
ov::parallel_for(scales_arr.get_size(), [&](size_t i) {
|
|
108
|
+
float scale = static_cast<float>(ov::float16::from_bits(*((uint16_t *) (data + i * bytes_per_block))));
|
|
109
|
+
float min = static_cast<float>(ov::float16::from_bits(*((uint16_t *) (data + i * bytes_per_block + 2))));
|
|
110
|
+
scales[i] = ov::float16(scale);
|
|
111
|
+
// zp = -min / scale (bias = min, so zp = -bias/scale)
|
|
112
|
+
uint8_t zp_val = (scale != 0.0f) ? (uint8_t) std::round(-min / scale) : 0;
|
|
113
|
+
// Pack two 4-bit zero points per byte
|
|
114
|
+
if (i % 2 == 0) {
|
|
115
|
+
zp[i / 2] = zp_val & 0x0F; // Lower nibble
|
|
116
|
+
} else {
|
|
117
|
+
zp[i / 2] |= (zp_val << 4); // Upper nibble
|
|
118
|
+
}
|
|
119
|
+
unpack_32_4(data + i * bytes_per_block + 4, weights + i * 16);
|
|
120
|
+
});
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// Extracts (weight, scales, zp) from Q8_0 tensors.
|
|
125
|
+
// Data layout is: |16 bit scale|32 x 8bit weights|.
|
|
126
|
+
void extract_q8_0_data(const ggml_tensor * tensor,
|
|
127
|
+
ov::Tensor & weights_arr,
|
|
128
|
+
ov::Tensor & scales_arr,
|
|
129
|
+
ov::Tensor & zp_arr) {
|
|
130
|
+
const uint64_t weights_per_block = 32;
|
|
131
|
+
const uint64_t bytes_per_block = 34; // 2 bytes scale, 32x1 byte weights
|
|
132
|
+
|
|
133
|
+
auto * data = static_cast<uint8_t *>(tensor->data);
|
|
134
|
+
auto * weights = static_cast<uint8_t *>(weights_arr.data());
|
|
135
|
+
auto * scales = scales_arr.data<ov::element_type_traits<ov::element::f16>::value_type>();
|
|
136
|
+
auto * zp = static_cast<uint8_t *>(zp_arr.data());
|
|
137
|
+
|
|
138
|
+
bool is_scalar_zp = (zp_arr.get_size() == 1); // Symmetric quantization
|
|
139
|
+
|
|
140
|
+
// For Q8_0, zero point is always 128
|
|
141
|
+
if (is_scalar_zp) {
|
|
142
|
+
zp[0] = 128;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
ov::parallel_for(scales_arr.get_size(), [&](size_t i) {
|
|
146
|
+
uint8_t * block_data = data + i * bytes_per_block;
|
|
147
|
+
scales[i] = ov::float16::from_bits(*(uint16_t *) block_data);
|
|
148
|
+
// For asymmetric quantization, store per-block zero points
|
|
149
|
+
if (!is_scalar_zp) {
|
|
150
|
+
zp[i] = 128;
|
|
151
|
+
}
|
|
152
|
+
for (size_t j = 0; j < weights_per_block; ++j) {
|
|
153
|
+
uint8_t x = block_data[j + 2]; // j+2 to skip the scale bytes.
|
|
154
|
+
// Original data is in int8_t, so we add a bias of -128 and invert the first bit.
|
|
155
|
+
x ^= 1 << 7;
|
|
156
|
+
weights[i * weights_per_block + j] = x;
|
|
157
|
+
}
|
|
158
|
+
});
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
void unpack_256_4(const uint8_t * data, uint8_t * dst) {
|
|
162
|
+
// Initialize the output array with zeros
|
|
163
|
+
std::fill_n(dst, 128, 0);
|
|
164
|
+
|
|
165
|
+
for (size_t i = 0; i < 4; ++i) {
|
|
166
|
+
for (int j = 0; j < 32; ++j) {
|
|
167
|
+
uint8_t x = (data[i * 32 + j] & 0x0F);
|
|
168
|
+
uint8_t y = (data[i * 32 + j] >> 4);
|
|
169
|
+
if (j % 2 != 0) {
|
|
170
|
+
x <<= 4;
|
|
171
|
+
y <<= 4;
|
|
172
|
+
}
|
|
173
|
+
dst[i * 32 + j / 2] |= x;
|
|
174
|
+
dst[i * 32 + 16 + j / 2] |= y; // Last 16 weights are in the higher bits
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
void extract_q4_k_data(const ggml_tensor * tensor,
|
|
180
|
+
ov::Tensor & weights_arr,
|
|
181
|
+
ov::Tensor & scales_arr,
|
|
182
|
+
ov::Tensor & zp_arr,
|
|
183
|
+
bool use_bias) {
|
|
184
|
+
const uint64_t bytes_per_block = 2 + 2 + 12 + 128;
|
|
185
|
+
const uint64_t n_super_block = tensor->nb[3] / bytes_per_block;
|
|
186
|
+
|
|
187
|
+
auto * data = static_cast<uint8_t *>(tensor->data);
|
|
188
|
+
auto * weights = static_cast<uint8_t *>(weights_arr.data());
|
|
189
|
+
auto * scales = scales_arr.data<ov::element_type_traits<ov::element::f16>::value_type>();
|
|
190
|
+
|
|
191
|
+
// For bias path, zp_arr holds f16 bias values; for zp path, it holds packed u4 zero points
|
|
192
|
+
auto * zp_u4 = use_bias ? nullptr : static_cast<uint8_t *>(zp_arr.data());
|
|
193
|
+
auto * bias_f16 = use_bias ? zp_arr.data<ov::element_type_traits<ov::element::f16>::value_type>() : nullptr;
|
|
194
|
+
|
|
195
|
+
ov::parallel_for(n_super_block, [&](size_t i) {
|
|
196
|
+
uint8_t * block_data = data + i * bytes_per_block;
|
|
197
|
+
|
|
198
|
+
// Extract scale factors and offsets
|
|
199
|
+
float scale_scales = static_cast<float>(ov::float16::from_bits(*((uint16_t *) block_data)));
|
|
200
|
+
float scale_mins = static_cast<float>(ov::float16::from_bits(*((uint16_t *) block_data + 1)));
|
|
201
|
+
|
|
202
|
+
// Extract qs1 and qs2
|
|
203
|
+
uint8_t * qs1 = block_data + 4;
|
|
204
|
+
|
|
205
|
+
// Calculate scales
|
|
206
|
+
float scale_vals[8];
|
|
207
|
+
scale_vals[0] = scale_scales * static_cast<float>((*(qs1) & 0b111111));
|
|
208
|
+
scale_vals[1] = scale_scales * static_cast<float>((*(qs1 + 1) & 0b111111));
|
|
209
|
+
scale_vals[2] = scale_scales * static_cast<float>((*(qs1 + 2) & 0b111111));
|
|
210
|
+
scale_vals[3] = scale_scales * static_cast<float>((*(qs1 + 3) & 0b111111));
|
|
211
|
+
scale_vals[4] = scale_scales * static_cast<float>((*(qs1 + 8) & 0b00001111) | ((*(qs1) >> 6) << 4));
|
|
212
|
+
scale_vals[5] = scale_scales * static_cast<float>((*(qs1 + 9) & 0b00001111) | ((*(qs1 + 1) >> 6) << 4));
|
|
213
|
+
scale_vals[6] = scale_scales * static_cast<float>((*(qs1 + 10) & 0b00001111) | ((*(qs1 + 2) >> 6) << 4));
|
|
214
|
+
scale_vals[7] = scale_scales * static_cast<float>((*(qs1 + 11) & 0b00001111) | ((*(qs1 + 3) >> 6) << 4));
|
|
215
|
+
|
|
216
|
+
// Calculate min values (bias = -min)
|
|
217
|
+
float min_vals[8];
|
|
218
|
+
min_vals[0] = scale_mins * static_cast<float>((*(qs1 + 4) & 0b111111));
|
|
219
|
+
min_vals[1] = scale_mins * static_cast<float>((*(qs1 + 5) & 0b111111));
|
|
220
|
+
min_vals[2] = scale_mins * static_cast<float>((*(qs1 + 6) & 0b111111));
|
|
221
|
+
min_vals[3] = scale_mins * static_cast<float>((*(qs1 + 7) & 0b111111));
|
|
222
|
+
min_vals[4] = scale_mins * static_cast<float>((*(qs1 + 8) >> 4) | ((*(qs1 + 4) >> 6) << 4));
|
|
223
|
+
min_vals[5] = scale_mins * static_cast<float>((*(qs1 + 9) >> 4) | ((*(qs1 + 5) >> 6) << 4));
|
|
224
|
+
min_vals[6] = scale_mins * static_cast<float>((*(qs1 + 10) >> 4) | ((*(qs1 + 6) >> 6) << 4));
|
|
225
|
+
min_vals[7] = scale_mins * static_cast<float>((*(qs1 + 11) >> 4) | ((*(qs1 + 7) >> 6) << 4));
|
|
226
|
+
|
|
227
|
+
// Store scales and compute zero points or bias
|
|
228
|
+
for (int j = 0; j < 8; j++) {
|
|
229
|
+
scales[i * 8 + j] = ov::float16(scale_vals[j]);
|
|
230
|
+
if (use_bias) {
|
|
231
|
+
// Store bias = -min directly as f16, dequant: w*s + bias
|
|
232
|
+
bias_f16[i * 8 + j] = ov::float16(-min_vals[j]);
|
|
233
|
+
} else {
|
|
234
|
+
// zp = min / scale (since bias = -min and zp = -bias/scale)
|
|
235
|
+
uint8_t zp_val = (scale_vals[j] != 0.0f) ? (uint8_t) std::round(min_vals[j] / scale_vals[j]) : 0;
|
|
236
|
+
// Pack two 4-bit zero points per byte
|
|
237
|
+
size_t idx = i * 8 + j;
|
|
238
|
+
if (idx % 2 == 0) {
|
|
239
|
+
zp_u4[idx / 2] = zp_val & 0x0F;
|
|
240
|
+
} else {
|
|
241
|
+
zp_u4[idx / 2] |= (zp_val << 4);
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
unpack_256_4(block_data + 16, weights + i * 128);
|
|
246
|
+
});
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
void extract_q6_k_data(const ggml_tensor * tensor,
|
|
250
|
+
ov::Tensor & weights_arr,
|
|
251
|
+
ov::Tensor & scales_arr,
|
|
252
|
+
ov::Tensor & zp_arr) {
|
|
253
|
+
const uint64_t bytes_per_block = 128 + 64 + 16 + 2;
|
|
254
|
+
const uint64_t n_super_block = tensor->nb[3] / bytes_per_block;
|
|
255
|
+
|
|
256
|
+
auto * data = static_cast<uint8_t *>(tensor->data);
|
|
257
|
+
auto * weights = static_cast<uint8_t *>(weights_arr.data());
|
|
258
|
+
auto * scales = scales_arr.data<ov::element_type_traits<ov::element::f16>::value_type>();
|
|
259
|
+
auto * zp = static_cast<uint8_t *>(zp_arr.data());
|
|
260
|
+
|
|
261
|
+
bool is_scalar_zp = (zp_arr.get_size() == 1); // Symmetric quantization
|
|
262
|
+
|
|
263
|
+
// For Q6_K, zero point is always 32
|
|
264
|
+
if (is_scalar_zp) {
|
|
265
|
+
zp[0] = 32;
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
ov::parallel_for(n_super_block, [&](size_t i) {
|
|
269
|
+
uint8_t * block_data = data + i * bytes_per_block;
|
|
270
|
+
|
|
271
|
+
float scale_factor =
|
|
272
|
+
static_cast<float>(ov::float16::from_bits(*((uint16_t *) block_data + 104))); // (128+64+16)/2
|
|
273
|
+
|
|
274
|
+
for (size_t j = 0; j < 16; j++) {
|
|
275
|
+
scales[j + i * 16] =
|
|
276
|
+
ov::float16(scale_factor * static_cast<float>(*((int8_t *) (block_data + 128 + 64 + j))));
|
|
277
|
+
// For asymmetric quantization, store per-block zero points
|
|
278
|
+
if (!is_scalar_zp) {
|
|
279
|
+
zp[j + i * 16] = 32;
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
uint8_t * ql = block_data;
|
|
284
|
+
uint8_t * qh = block_data + 128;
|
|
285
|
+
|
|
286
|
+
for (int64_t j = 0; j < 32; ++j) {
|
|
287
|
+
weights[i * 256 + j] = (ql[j] & 0xF) | (((qh[j] >> 0) & 3) << 4);
|
|
288
|
+
weights[i * 256 + j + 32] = (ql[32 + j] & 0xF) | (((qh[j] >> 2) & 3) << 4);
|
|
289
|
+
weights[i * 256 + j + 64] = (ql[j] >> 4) | (((qh[j] >> 4) & 3) << 4);
|
|
290
|
+
weights[i * 256 + j + 96] = (ql[32 + j] >> 4) | (((qh[j] >> 6) & 3) << 4);
|
|
291
|
+
weights[i * 256 + j + 128] = (ql[64 + j] & 0xF) | (((qh[32 + j] >> 0) & 3) << 4);
|
|
292
|
+
weights[i * 256 + j + 160] = (ql[96 + j] & 0xF) | (((qh[32 + j] >> 2) & 3) << 4);
|
|
293
|
+
weights[i * 256 + j + 192] = (ql[64 + j] >> 4) | (((qh[32 + j] >> 4) & 3) << 4);
|
|
294
|
+
weights[i * 256 + j + 224] = (ql[96 + j] >> 4) | (((qh[32 + j] >> 6) & 3) << 4);
|
|
295
|
+
}
|
|
296
|
+
});
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
static inline void get_scale_min_k4(int j, const uint8_t * q, uint8_t * d, uint8_t * m) {
|
|
300
|
+
if (j < 4) {
|
|
301
|
+
*d = q[j] & 63;
|
|
302
|
+
*m = q[j + 4] & 63;
|
|
303
|
+
} else {
|
|
304
|
+
*d = (q[j + 4] & 0xF) | ((q[j - 4] >> 6) << 4);
|
|
305
|
+
*m = (q[j + 4] >> 4) | ((q[j - 0] >> 6) << 4);
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
void extract_q5_k_data(const ggml_tensor * tensor,
|
|
310
|
+
ov::Tensor & weights_arr,
|
|
311
|
+
ov::Tensor & scales_arr,
|
|
312
|
+
ov::Tensor & zp_arr,
|
|
313
|
+
bool use_bias) {
|
|
314
|
+
const uint64_t bytes_per_block = 4 + 12 + 32 + 128;
|
|
315
|
+
const uint64_t n_super_block = tensor->nb[3] / bytes_per_block;
|
|
316
|
+
|
|
317
|
+
auto * data = static_cast<uint8_t *>(tensor->data);
|
|
318
|
+
auto * weights = static_cast<uint8_t *>(weights_arr.data());
|
|
319
|
+
auto * scales = scales_arr.data<ov::element_type_traits<ov::element::f16>::value_type>();
|
|
320
|
+
|
|
321
|
+
// For bias path, zp_arr holds f16 bias values; for zp path, it holds u8 zero points
|
|
322
|
+
auto * zp_u8 = use_bias ? nullptr : static_cast<uint8_t *>(zp_arr.data());
|
|
323
|
+
auto * bias_f16 = use_bias ? zp_arr.data<ov::element_type_traits<ov::element::f16>::value_type>() : nullptr;
|
|
324
|
+
|
|
325
|
+
ov::parallel_for(n_super_block, [&](size_t i) {
|
|
326
|
+
uint8_t * block_data = data + i * bytes_per_block;
|
|
327
|
+
|
|
328
|
+
const float d = static_cast<float>(ov::float16::from_bits(*((uint16_t *) block_data)));
|
|
329
|
+
const float min_factor = static_cast<float>(ov::float16::from_bits(*((uint16_t *) block_data + 1)));
|
|
330
|
+
|
|
331
|
+
const uint8_t * scales_data = block_data + 4; // 12 bytes of scales
|
|
332
|
+
const uint8_t * qh = block_data + 4 + 12; // 32 bytes of high bits
|
|
333
|
+
const uint8_t * ql = block_data + 4 + 12 + 32; // 128 bytes of low bits
|
|
334
|
+
|
|
335
|
+
int is = 0;
|
|
336
|
+
uint8_t u1 = 1;
|
|
337
|
+
uint8_t u2 = 2;
|
|
338
|
+
|
|
339
|
+
// Process 2 blocks in one iteration
|
|
340
|
+
for (int j = 0; j < 256; j += 64) { // 256 = QK_K, so 4 iterations of 64
|
|
341
|
+
uint8_t sc;
|
|
342
|
+
uint8_t m;
|
|
343
|
+
|
|
344
|
+
// Get scale and min for first 32 elements
|
|
345
|
+
get_scale_min_k4(is + 0, scales_data, &sc, &m);
|
|
346
|
+
const float d1 = d * sc;
|
|
347
|
+
const float m1 = min_factor * m;
|
|
348
|
+
|
|
349
|
+
// Get scale and min for second 32 elements
|
|
350
|
+
get_scale_min_k4(is + 1, scales_data, &sc, &m);
|
|
351
|
+
const float d2 = d * sc;
|
|
352
|
+
const float m2 = min_factor * m;
|
|
353
|
+
|
|
354
|
+
scales[i * 8 + is] = ov::float16(d1);
|
|
355
|
+
scales[i * 8 + is + 1] = ov::float16(d2);
|
|
356
|
+
if (use_bias) {
|
|
357
|
+
// Store bias = -min directly as f16, dequant: w*s + bias
|
|
358
|
+
bias_f16[i * 8 + is] = ov::float16(-m1);
|
|
359
|
+
bias_f16[i * 8 + is + 1] = ov::float16(-m2);
|
|
360
|
+
} else {
|
|
361
|
+
// zp = min / scale (since bias = -min and zp = -bias/scale)
|
|
362
|
+
zp_u8[i * 8 + is] = (d1 != 0.0f) ? (uint8_t) std::round(m1 / d1) : 0;
|
|
363
|
+
zp_u8[i * 8 + is + 1] = (d2 != 0.0f) ? (uint8_t) std::round(m2 / d2) : 0;
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
// Extract weights for first 32 elements (matching deq formula exactly)
|
|
367
|
+
for (int l = 0; l < 32; ++l) {
|
|
368
|
+
weights[i * 256 + j + l] = (ql[l] & 0xF) + ((qh[l] & u1) ? 16 : 0);
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
// Extract weights for second 32 elements
|
|
372
|
+
for (int l = 0; l < 32; ++l) {
|
|
373
|
+
weights[i * 256 + j + l + 32] = (ql[l] >> 4) + ((qh[l] & u2) ? 16 : 0);
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
ql += 32;
|
|
377
|
+
is += 2;
|
|
378
|
+
u1 <<= 2;
|
|
379
|
+
u2 <<= 2;
|
|
380
|
+
}
|
|
381
|
+
});
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
// TODO Reorder for make_intX_weights
|
|
385
|
+
|
|
386
|
+
ov::Output<ov::Node> make_int8_weights(ov::Tensor & weight,
|
|
387
|
+
ov::Tensor & scales,
|
|
388
|
+
ov::Tensor & zp,
|
|
389
|
+
size_t group_size,
|
|
390
|
+
bool use_bias) {
|
|
391
|
+
ov::Shape orig_shape = weight.get_shape();
|
|
392
|
+
|
|
393
|
+
// Expand dimensions for scales and zp/bias
|
|
394
|
+
auto scale_shape = scales.get_shape();
|
|
395
|
+
auto zp_shape = zp.get_shape();
|
|
396
|
+
bool is_scalar_zp = zp_shape.empty(); // Symmetric quantization
|
|
397
|
+
|
|
398
|
+
ov::Shape packed_shape = {orig_shape[0], orig_shape[1] / group_size, group_size};
|
|
399
|
+
|
|
400
|
+
if (packed_shape[1] == 1) {
|
|
401
|
+
// Requantized channel-wise case
|
|
402
|
+
packed_shape.erase(packed_shape.begin() + 1);
|
|
403
|
+
} else {
|
|
404
|
+
scale_shape.push_back(1);
|
|
405
|
+
scales.set_shape(scale_shape);
|
|
406
|
+
// For symmetric quantization, zp remains scalar (don't resize)
|
|
407
|
+
if (!is_scalar_zp) {
|
|
408
|
+
zp_shape.push_back(1);
|
|
409
|
+
zp.set_shape(zp_shape);
|
|
410
|
+
}
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
// Create graph nodes
|
|
414
|
+
auto weights_node = std::make_shared<ov::op::v0::Constant>(ov::element::u8, packed_shape,
|
|
415
|
+
static_cast<uint8_t *>(weight.data()), nullptr);
|
|
416
|
+
weights_node->get_rt_info()["__gguf_tensor_holder"] = weight;
|
|
417
|
+
auto scales_f16 = std::make_shared<ov::op::v0::Constant>(scales);
|
|
418
|
+
auto weights_f16 = std::make_shared<ov::op::v0::Convert>(weights_node, ov::element::f16);
|
|
419
|
+
|
|
420
|
+
ov::Output<ov::Node> result;
|
|
421
|
+
if (use_bias && !is_scalar_zp) {
|
|
422
|
+
// Bias path: w * s + b (zp tensor holds f16 bias values)
|
|
423
|
+
auto bias_f16 = std::make_shared<ov::op::v0::Constant>(zp);
|
|
424
|
+
auto w_s = std::make_shared<ov::op::v1::Multiply>(weights_f16, scales_f16, ov::op::AutoBroadcastType::NUMPY);
|
|
425
|
+
result = std::make_shared<ov::op::v1::Add>(w_s, bias_f16, ov::op::AutoBroadcastType::NUMPY);
|
|
426
|
+
} else {
|
|
427
|
+
// Zero point path: (w - zp) * s
|
|
428
|
+
auto zero_point = std::make_shared<ov::op::v0::Constant>(zp);
|
|
429
|
+
float zp_value;
|
|
430
|
+
if (ov::op::util::get_single_value(zero_point, zp_value)) {
|
|
431
|
+
zero_point = ov::op::v0::Constant::create(zero_point->get_element_type(), {}, {zp_value});
|
|
432
|
+
}
|
|
433
|
+
auto zero_point_f16 = std::make_shared<ov::op::v0::Convert>(zero_point, ov::element::f16);
|
|
434
|
+
auto w_zp =
|
|
435
|
+
std::make_shared<ov::op::v1::Subtract>(weights_f16, zero_point_f16, ov::op::AutoBroadcastType::NUMPY);
|
|
436
|
+
result = std::make_shared<ov::op::v1::Multiply>(w_zp, scales_f16, ov::op::AutoBroadcastType::NUMPY);
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
if (packed_shape.size() != 2) {
|
|
440
|
+
// If not requantized channel-wise case, reshape back to original shape
|
|
441
|
+
auto final_shape =
|
|
442
|
+
std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{orig_shape.size()}, orig_shape);
|
|
443
|
+
result = std::make_shared<ov::op::v1::Reshape>(result, final_shape, false);
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
return std::make_shared<ov::op::v0::Convert>(result, ov::element::f32);
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
ov::Output<ov::Node> make_int4_weights(ov::Tensor & weight,
|
|
450
|
+
ov::Tensor & scales,
|
|
451
|
+
ov::Tensor & zp,
|
|
452
|
+
size_t group_size,
|
|
453
|
+
bool use_bias) {
|
|
454
|
+
ov::Shape orig_weight_shape = weight.get_shape();
|
|
455
|
+
|
|
456
|
+
// Expand dimensions for scales and zp/bias
|
|
457
|
+
ov::Shape scale_shape = scales.get_shape();
|
|
458
|
+
auto zp_shape = zp.get_shape();
|
|
459
|
+
bool is_scalar_zp = zp_shape.empty(); // Symmetric quantization
|
|
460
|
+
|
|
461
|
+
// Create INT4 weight tensor
|
|
462
|
+
ov::Shape packed_shape = {orig_weight_shape[0], orig_weight_shape[1] / group_size, group_size};
|
|
463
|
+
|
|
464
|
+
if (packed_shape[1] == 1) {
|
|
465
|
+
// Requantized channel-wise case
|
|
466
|
+
packed_shape.erase(packed_shape.begin() + 1);
|
|
467
|
+
} else {
|
|
468
|
+
scale_shape.push_back(1);
|
|
469
|
+
scales.set_shape(scale_shape);
|
|
470
|
+
// For symmetric quantization, zp remains scalar (don't resize)
|
|
471
|
+
if (!is_scalar_zp) {
|
|
472
|
+
zp_shape.push_back(1);
|
|
473
|
+
zp.set_shape(zp_shape);
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
auto weights_node = std::make_shared<ov::op::v0::Constant>(ov::element::u4, packed_shape,
|
|
478
|
+
static_cast<uint8_t *>(weight.data()), nullptr);
|
|
479
|
+
weights_node->get_rt_info()["__gguf_tensor_holder"] = weight;
|
|
480
|
+
auto weights_f16 = std::make_shared<ov::op::v0::Convert>(weights_node, ov::element::f16);
|
|
481
|
+
auto scales_f16 = std::make_shared<ov::op::v0::Constant>(scales);
|
|
482
|
+
|
|
483
|
+
ov::Output<ov::Node> result;
|
|
484
|
+
if (use_bias && !is_scalar_zp) {
|
|
485
|
+
// Bias path: w * s + b (zp tensor holds f16 bias values)
|
|
486
|
+
auto bias_f16 = std::make_shared<ov::op::v0::Constant>(zp);
|
|
487
|
+
auto w_s = std::make_shared<ov::op::v1::Multiply>(weights_f16, scales_f16, ov::op::AutoBroadcastType::NUMPY);
|
|
488
|
+
result = std::make_shared<ov::op::v1::Add>(w_s, bias_f16, ov::op::AutoBroadcastType::NUMPY);
|
|
489
|
+
} else {
|
|
490
|
+
// Zero point path: (w - zp) * s
|
|
491
|
+
auto zero_points_node = std::make_shared<ov::op::v0::Constant>(zp);
|
|
492
|
+
float zp_value;
|
|
493
|
+
if (ov::op::util::get_single_value(zero_points_node, zp_value)) {
|
|
494
|
+
zero_points_node = ov::op::v0::Constant::create(zero_points_node->get_element_type(), {}, {zp_value});
|
|
495
|
+
}
|
|
496
|
+
auto zero_points_f16 = std::make_shared<ov::op::v0::Convert>(zero_points_node, ov::element::f16);
|
|
497
|
+
auto w_zp =
|
|
498
|
+
std::make_shared<ov::op::v1::Subtract>(weights_f16, zero_points_f16, ov::op::AutoBroadcastType::NUMPY);
|
|
499
|
+
result = std::make_shared<ov::op::v1::Multiply>(w_zp, scales_f16, ov::op::AutoBroadcastType::NUMPY);
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
if (packed_shape.size() != 2) {
|
|
503
|
+
// If not requantized channel-wise case, reshape back to original shape
|
|
504
|
+
auto final_shape = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{orig_weight_shape.size()},
|
|
505
|
+
orig_weight_shape);
|
|
506
|
+
result = std::make_shared<ov::op::v1::Reshape>(result, final_shape, false);
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
return std::make_shared<ov::op::v0::Convert>(result, ov::element::f32);
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
// Extract quantized weights from tensor and create weight subgraph
|
|
513
|
+
std::shared_ptr<ov::Node> extract_quantized_weights(const ggml_tensor * tensor,
|
|
514
|
+
const void * data,
|
|
515
|
+
ov::Tensor & weights,
|
|
516
|
+
ov::Tensor & scales,
|
|
517
|
+
ov::Tensor & zp,
|
|
518
|
+
bool use_bias) {
|
|
519
|
+
// Create a temporary tensor for extraction functions that read from tensor->data
|
|
520
|
+
ggml_tensor temp_tensor = *tensor;
|
|
521
|
+
temp_tensor.data = const_cast<void *>(data);
|
|
522
|
+
|
|
523
|
+
// Determine block size based on tensor type
|
|
524
|
+
int64_t weights_per_block;
|
|
525
|
+
bool is_u4;
|
|
526
|
+
switch (tensor->type) {
|
|
527
|
+
case GGML_TYPE_Q4_0:
|
|
528
|
+
case GGML_TYPE_Q4_1:
|
|
529
|
+
case GGML_TYPE_Q4_K:
|
|
530
|
+
is_u4 = true;
|
|
531
|
+
weights_per_block = 32;
|
|
532
|
+
break;
|
|
533
|
+
case GGML_TYPE_Q8_0:
|
|
534
|
+
case GGML_TYPE_Q5_K:
|
|
535
|
+
is_u4 = false;
|
|
536
|
+
weights_per_block = 32;
|
|
537
|
+
break;
|
|
538
|
+
case GGML_TYPE_Q6_K:
|
|
539
|
+
is_u4 = false;
|
|
540
|
+
weights_per_block = 16;
|
|
541
|
+
break;
|
|
542
|
+
default:
|
|
543
|
+
throw std::runtime_error("Unsupported quantized type for extraction: " +
|
|
544
|
+
std::string(ggml_type_name(tensor->type)));
|
|
545
|
+
}
|
|
546
|
+
|
|
547
|
+
// Extract quantized data
|
|
548
|
+
switch (tensor->type) {
|
|
549
|
+
case GGML_TYPE_Q4_0:
|
|
550
|
+
extract_q4_0_data(&temp_tensor, weights, scales, zp);
|
|
551
|
+
break;
|
|
552
|
+
case GGML_TYPE_Q4_1:
|
|
553
|
+
extract_q4_1_data(&temp_tensor, weights, scales, zp, use_bias);
|
|
554
|
+
break;
|
|
555
|
+
case GGML_TYPE_Q4_K:
|
|
556
|
+
extract_q4_k_data(&temp_tensor, weights, scales, zp, use_bias);
|
|
557
|
+
break;
|
|
558
|
+
case GGML_TYPE_Q8_0:
|
|
559
|
+
extract_q8_0_data(&temp_tensor, weights, scales, zp);
|
|
560
|
+
break;
|
|
561
|
+
case GGML_TYPE_Q6_K:
|
|
562
|
+
extract_q6_k_data(&temp_tensor, weights, scales, zp);
|
|
563
|
+
break;
|
|
564
|
+
case GGML_TYPE_Q5_K:
|
|
565
|
+
extract_q5_k_data(&temp_tensor, weights, scales, zp, use_bias);
|
|
566
|
+
break;
|
|
567
|
+
default:
|
|
568
|
+
throw std::runtime_error("Unsupported quantized type: " + std::string(ggml_type_name(tensor->type)));
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
// Create the OpenVINO weight subgraph
|
|
572
|
+
ov::Output<ov::Node> weight_node;
|
|
573
|
+
if (is_u4) {
|
|
574
|
+
weight_node = make_int4_weights(weights, scales, zp, weights_per_block, use_bias);
|
|
575
|
+
} else {
|
|
576
|
+
weight_node = make_int8_weights(weights, scales, zp, weights_per_block, use_bias);
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
auto result = weight_node.get_node_shared_ptr();
|
|
580
|
+
result->set_friendly_name(tensor->name);
|
|
581
|
+
return result;
|
|
582
|
+
}
|
|
583
|
+
|
|
584
|
+
// Requantize weights to target format, writing to provided buffers
|
|
585
|
+
std::shared_ptr<ov::Node> requantize_to_buffers(const ggml_tensor * tensor,
|
|
586
|
+
const void * data,
|
|
587
|
+
ExtraQuantType requant_type,
|
|
588
|
+
int64_t block_size,
|
|
589
|
+
ov::Tensor & weights,
|
|
590
|
+
ov::Tensor & scales,
|
|
591
|
+
ov::Tensor & zp) {
|
|
592
|
+
int64_t n_elements = ggml_nelements(tensor);
|
|
593
|
+
|
|
594
|
+
// First dequantize to F32
|
|
595
|
+
std::vector<float> weights_f32(n_elements);
|
|
596
|
+
ggml_get_type_traits(tensor->type)->to_float(data, weights_f32.data(), n_elements);
|
|
597
|
+
|
|
598
|
+
// Handle F16 case - just convert and create constant
|
|
599
|
+
if (requant_type == ExtraQuantType::F16) {
|
|
600
|
+
ggml_get_type_traits(GGML_TYPE_F16)->from_float_ref(weights_f32.data(), weights.data(), n_elements);
|
|
601
|
+
auto result = std::make_shared<ov::op::v0::Constant>(weights);
|
|
602
|
+
result->set_friendly_name(tensor->name);
|
|
603
|
+
return result;
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
// Requantize to target quantized format
|
|
607
|
+
bool is_u4 = (requant_type == ExtraQuantType::Q4_0_C || requant_type == ExtraQuantType::Q4_0_128);
|
|
608
|
+
|
|
609
|
+
if (is_u4) {
|
|
610
|
+
quantize_q4_0(weights_f32.data(), weights, scales, zp, n_elements, block_size);
|
|
611
|
+
} else if (requant_type == ExtraQuantType::Q8_1_C) {
|
|
612
|
+
quantize_q8_1(weights_f32.data(), weights, scales, zp, n_elements, block_size);
|
|
613
|
+
} else {
|
|
614
|
+
quantize_q8_0(weights_f32.data(), weights, scales, zp, n_elements, block_size);
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
// Create the OpenVINO weight subgraph
|
|
618
|
+
ov::Output<ov::Node> weight_node;
|
|
619
|
+
if (is_u4) {
|
|
620
|
+
weight_node = make_int4_weights(weights, scales, zp, block_size);
|
|
621
|
+
} else {
|
|
622
|
+
weight_node = make_int8_weights(weights, scales, zp, block_size);
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
auto result = weight_node.get_node_shared_ptr();
|
|
626
|
+
result->set_friendly_name(tensor->name);
|
|
627
|
+
return result;
|
|
628
|
+
}
|
|
629
|
+
|
|
630
|
+
OvWeight process_weight_tensor(const ggml_tensor * tensor, const void * data, void * output_base_ptr, bool use_bias) {
|
|
631
|
+
GGML_ASSERT(tensor != nullptr);
|
|
632
|
+
GGML_ASSERT(data != nullptr);
|
|
633
|
+
|
|
634
|
+
OvWeight result;
|
|
635
|
+
|
|
636
|
+
// Get 2D shape for weights [rows, cols]
|
|
637
|
+
ov::Shape node_shape = {static_cast<size_t>(tensor->ne[1]), static_cast<size_t>(tensor->ne[0])};
|
|
638
|
+
|
|
639
|
+
// Handle F16/F32/BF16 weights
|
|
640
|
+
if (tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_BF16) {
|
|
641
|
+
ov::element::Type element_type;
|
|
642
|
+
switch (tensor->type) {
|
|
643
|
+
case GGML_TYPE_F32:
|
|
644
|
+
element_type = ov::element::f32;
|
|
645
|
+
break;
|
|
646
|
+
case GGML_TYPE_F16:
|
|
647
|
+
element_type = ov::element::f16;
|
|
648
|
+
break;
|
|
649
|
+
case GGML_TYPE_BF16:
|
|
650
|
+
element_type = ov::element::bf16;
|
|
651
|
+
break;
|
|
652
|
+
default:
|
|
653
|
+
OPENVINO_THROW("Unexpected tensor type in F16/F32/BF16 path");
|
|
654
|
+
}
|
|
655
|
+
|
|
656
|
+
if (output_base_ptr && output_base_ptr != data) {
|
|
657
|
+
// Using external buffer - copy data and create shared-memory constant
|
|
658
|
+
size_t tensor_bytes = ggml_nbytes(tensor);
|
|
659
|
+
memcpy(output_base_ptr, data, tensor_bytes);
|
|
660
|
+
result.weights = ov::Tensor(element_type, node_shape, output_base_ptr);
|
|
661
|
+
} else {
|
|
662
|
+
result.weights = ov::Tensor(element_type, node_shape, data);
|
|
663
|
+
}
|
|
664
|
+
result.weight_node = std::make_shared<ov::op::v0::Constant>(result.weights);
|
|
665
|
+
return result;
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
// Handle quantized weights
|
|
669
|
+
if (!ggml_is_quantized(tensor->type)) {
|
|
670
|
+
OPENVINO_THROW("Unsupported weight tensor type: ", ggml_type_name(tensor->type));
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
result.layout = ggml_openvino_get_extracted_layout(tensor, use_bias);
|
|
674
|
+
const auto & layout = result.layout;
|
|
675
|
+
if (layout.total_size == 0) {
|
|
676
|
+
OPENVINO_THROW("Unsupported quantized type: ", ggml_type_name(tensor->type));
|
|
677
|
+
}
|
|
678
|
+
|
|
679
|
+
if (use_bias) {
|
|
680
|
+
OPENVINO_ASSERT(!layout.is_requant,
|
|
681
|
+
"use_bias is only used for test-backend-ops, which should not have requantization");
|
|
682
|
+
// bias node will be created on the fly and not use backend buffer
|
|
683
|
+
output_base_ptr = nullptr;
|
|
684
|
+
}
|
|
685
|
+
|
|
686
|
+
// F16 requant path - no separate scales/zp needed in result
|
|
687
|
+
if (layout.is_requant && layout.requant_type.has_value() && layout.requant_type.value() == ExtraQuantType::F16) {
|
|
688
|
+
if (output_base_ptr) {
|
|
689
|
+
result.weights = ov::Tensor(ov::element::f16, node_shape,
|
|
690
|
+
static_cast<uint8_t *>(output_base_ptr) + layout.weights_offset);
|
|
691
|
+
} else {
|
|
692
|
+
result.weights = ov::Tensor(ov::element::f16, node_shape);
|
|
693
|
+
}
|
|
694
|
+
ov::Tensor dummy_scales, dummy_zp; // Not used for F16
|
|
695
|
+
result.weight_node =
|
|
696
|
+
requantize_to_buffers(tensor, data, ExtraQuantType::F16, 0, result.weights, dummy_scales, dummy_zp);
|
|
697
|
+
return result;
|
|
698
|
+
}
|
|
699
|
+
|
|
700
|
+
// Quantized path (normal extraction or quantized requant)
|
|
701
|
+
// Create weight/scale/zp tensors - shared between both paths
|
|
702
|
+
ov::element::Type weight_type = layout.is_u4 ? ov::element::u4 : ov::element::u8;
|
|
703
|
+
ov::Shape scale_shape = {node_shape[0], node_shape[1] / layout.weights_per_block};
|
|
704
|
+
ov::Shape zp_shape = layout.is_symmetric ? ov::Shape{} : scale_shape;
|
|
705
|
+
|
|
706
|
+
if (output_base_ptr) {
|
|
707
|
+
uint8_t * buf_base = static_cast<uint8_t *>(output_base_ptr);
|
|
708
|
+
result.weights = ov::Tensor(weight_type, node_shape, buf_base + layout.weights_offset);
|
|
709
|
+
result.scales = ov::Tensor(ov::element::f16, scale_shape, buf_base + layout.scales_offset);
|
|
710
|
+
result.zp = ov::Tensor(weight_type, zp_shape, buf_base + layout.zp_offset);
|
|
711
|
+
} else {
|
|
712
|
+
result.weights = ov::Tensor(weight_type, node_shape);
|
|
713
|
+
result.scales = ov::Tensor(ov::element::f16, scale_shape);
|
|
714
|
+
if (use_bias && !layout.is_symmetric) {
|
|
715
|
+
// bias only has effect for asymmetric quant
|
|
716
|
+
result.zp = ov::Tensor(ov::element::f16, zp_shape);
|
|
717
|
+
} else {
|
|
718
|
+
result.zp = ov::Tensor(weight_type, zp_shape);
|
|
719
|
+
}
|
|
720
|
+
}
|
|
721
|
+
|
|
722
|
+
if (layout.is_requant && layout.requant_type.has_value()) {
|
|
723
|
+
result.weight_node = requantize_to_buffers(tensor, data, layout.requant_type.value(), layout.weights_per_block,
|
|
724
|
+
result.weights, result.scales, result.zp);
|
|
725
|
+
} else {
|
|
726
|
+
result.weight_node =
|
|
727
|
+
extract_quantized_weights(tensor, data, result.weights, result.scales, result.zp, use_bias);
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
return result;
|
|
731
|
+
}
|
|
732
|
+
|
|
733
|
+
void quantize_q4_0(const float * x,
|
|
734
|
+
ov::Tensor & weights_arr,
|
|
735
|
+
ov::Tensor & scales_arr,
|
|
736
|
+
ov::Tensor & zp_arr,
|
|
737
|
+
int64_t k,
|
|
738
|
+
int64_t qk) {
|
|
739
|
+
assert(k % qk == 0);
|
|
740
|
+
const int nb = k / qk;
|
|
741
|
+
|
|
742
|
+
auto * weights = static_cast<uint8_t *>(weights_arr.data());
|
|
743
|
+
auto * scales = scales_arr.data<ov::element_type_traits<ov::element::f16>::value_type>();
|
|
744
|
+
auto * zp = static_cast<uint8_t *>(zp_arr.data());
|
|
745
|
+
bool is_scalar_zp = (zp_arr.get_size() == 1); // Symmetric quantization
|
|
746
|
+
|
|
747
|
+
// For Q4_0, zero point is always 8
|
|
748
|
+
if (is_scalar_zp) {
|
|
749
|
+
zp[0] = 8 | (8 << 4); // Pack two 4-bit values
|
|
750
|
+
}
|
|
751
|
+
|
|
752
|
+
for (int i = 0; i < nb; i++) {
|
|
753
|
+
float amax = 0.0f; // absolute max
|
|
754
|
+
float max = 0.0f;
|
|
755
|
+
|
|
756
|
+
for (int j = 0; j < qk; j++) {
|
|
757
|
+
const float v = x[i * qk + j];
|
|
758
|
+
if (amax < fabsf(v)) {
|
|
759
|
+
amax = fabsf(v);
|
|
760
|
+
max = v;
|
|
761
|
+
}
|
|
762
|
+
}
|
|
763
|
+
|
|
764
|
+
const float d = max / -8;
|
|
765
|
+
|
|
766
|
+
if (d == 0) {
|
|
767
|
+
scales[i] = ov::float16(1.0f);
|
|
768
|
+
// zp is already set to 8 for symmetric, or set per-block for asymmetric
|
|
769
|
+
if (!is_scalar_zp) {
|
|
770
|
+
if (i % 2 == 0) {
|
|
771
|
+
zp[i / 2] = 8;
|
|
772
|
+
} else {
|
|
773
|
+
zp[i / 2] |= (8 << 4);
|
|
774
|
+
}
|
|
775
|
+
}
|
|
776
|
+
memset(weights + i * qk / 2, 8 | (8 << 4), qk / 2);
|
|
777
|
+
continue;
|
|
778
|
+
}
|
|
779
|
+
|
|
780
|
+
const float id = 1.0f / d;
|
|
781
|
+
scales[i] = ov::float16(d);
|
|
782
|
+
// For asymmetric quantization, store per-block zero points
|
|
783
|
+
if (!is_scalar_zp) {
|
|
784
|
+
if (i % 2 == 0) {
|
|
785
|
+
zp[i / 2] = 8;
|
|
786
|
+
} else {
|
|
787
|
+
zp[i / 2] |= (8 << 4);
|
|
788
|
+
}
|
|
789
|
+
}
|
|
790
|
+
|
|
791
|
+
for (int j = 0; j < qk / 2; ++j) {
|
|
792
|
+
const float x0 = x[i * qk + 2 * j] * id;
|
|
793
|
+
const float x1 = x[i * qk + 2 * j + 1] * id;
|
|
794
|
+
const uint8_t xi0 = MIN(15, (int8_t) (x0 + 8.5f));
|
|
795
|
+
const uint8_t xi1 = MIN(15, (int8_t) (x1 + 8.5f));
|
|
796
|
+
weights[i * qk / 2 + j] = xi0 | (xi1 << 4);
|
|
797
|
+
}
|
|
798
|
+
}
|
|
799
|
+
}
|
|
800
|
+
|
|
801
|
+
void quantize_q8_0(const float * x,
|
|
802
|
+
ov::Tensor & weights_arr,
|
|
803
|
+
ov::Tensor & scales_arr,
|
|
804
|
+
ov::Tensor & zp_arr,
|
|
805
|
+
int64_t k,
|
|
806
|
+
int64_t qk) {
|
|
807
|
+
assert(k % qk == 0);
|
|
808
|
+
const int nb = k / qk;
|
|
809
|
+
|
|
810
|
+
auto * weights = static_cast<uint8_t *>(weights_arr.data());
|
|
811
|
+
auto * scales = scales_arr.data<ov::element_type_traits<ov::element::f16>::value_type>();
|
|
812
|
+
auto * zp = static_cast<uint8_t *>(zp_arr.data());
|
|
813
|
+
bool is_scalar_zp = (zp_arr.get_size() == 1); // Symmetric quantization
|
|
814
|
+
|
|
815
|
+
// For Q8_0, zero point is always 128
|
|
816
|
+
if (is_scalar_zp) {
|
|
817
|
+
zp[0] = 128;
|
|
818
|
+
}
|
|
819
|
+
|
|
820
|
+
for (int i = 0; i < nb; i++) {
|
|
821
|
+
float amax = 0.0f; // absolute max
|
|
822
|
+
|
|
823
|
+
for (int j = 0; j < qk; j++) {
|
|
824
|
+
const float v = x[i * qk + j];
|
|
825
|
+
if (amax < fabsf(v)) {
|
|
826
|
+
amax = fabsf(v);
|
|
827
|
+
}
|
|
828
|
+
}
|
|
829
|
+
|
|
830
|
+
const float d = amax / 127.0f;
|
|
831
|
+
const float id = d ? 1.0f / d : 0.0f;
|
|
832
|
+
scales[i] = ov::float16(d);
|
|
833
|
+
// For asymmetric quantization, store per-block zero points
|
|
834
|
+
if (!is_scalar_zp) {
|
|
835
|
+
zp[i] = 128;
|
|
836
|
+
}
|
|
837
|
+
|
|
838
|
+
for (int j = 0; j < qk; ++j) {
|
|
839
|
+
const float x0 = x[i * qk + j] * id;
|
|
840
|
+
const int8_t xi0 = roundf(x0);
|
|
841
|
+
weights[i * qk + j] = (uint8_t) (xi0 + 128);
|
|
842
|
+
}
|
|
843
|
+
}
|
|
844
|
+
}
|
|
845
|
+
|
|
846
|
+
void quantize_q8_1(const float * x,
|
|
847
|
+
ov::Tensor & weights_arr,
|
|
848
|
+
ov::Tensor & scales_arr,
|
|
849
|
+
ov::Tensor & zp_arr,
|
|
850
|
+
int64_t k,
|
|
851
|
+
int64_t qk) {
|
|
852
|
+
assert(k % qk == 0);
|
|
853
|
+
const int nb = k / qk;
|
|
854
|
+
|
|
855
|
+
auto * weights = static_cast<uint8_t *>(weights_arr.data());
|
|
856
|
+
auto * scales = scales_arr.data<ov::element_type_traits<ov::element::f16>::value_type>();
|
|
857
|
+
auto * zp = static_cast<uint8_t *>(zp_arr.data());
|
|
858
|
+
for (int i = 0; i < nb; i++) {
|
|
859
|
+
float min = std::numeric_limits<float>::max();
|
|
860
|
+
float max = std::numeric_limits<float>::lowest();
|
|
861
|
+
|
|
862
|
+
for (int j = 0; j < qk; j++) {
|
|
863
|
+
const float v = x[i * qk + j];
|
|
864
|
+
if (v < min) {
|
|
865
|
+
min = v;
|
|
866
|
+
}
|
|
867
|
+
if (v > max) {
|
|
868
|
+
max = v;
|
|
869
|
+
}
|
|
870
|
+
}
|
|
871
|
+
|
|
872
|
+
const float d = (max - min) / ((1 << 8) - 1);
|
|
873
|
+
const float id = d ? 1.0f / d : 0.0f;
|
|
874
|
+
scales[i] = ov::float16(d);
|
|
875
|
+
// zp = -min / scale (Q8_1 is asymmetric)
|
|
876
|
+
zp[i] = (d != 0.0f) ? (uint8_t) std::round(-min / d) : 0;
|
|
877
|
+
|
|
878
|
+
for (int j = 0; j < qk; ++j) {
|
|
879
|
+
const float x0 = (x[i * qk + j] - min) * id;
|
|
880
|
+
const uint8_t xi0 = roundf(x0);
|
|
881
|
+
weights[i * qk + j] = xi0;
|
|
882
|
+
}
|
|
883
|
+
}
|
|
884
|
+
}
|