whispercpp 1.3.5 → 1.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +1 -1
- data/README.md +99 -2
- data/ext/extconf.rb +1 -0
- data/ext/ruby_whisper.c +20 -4
- data/ext/ruby_whisper.h +30 -2
- data/ext/ruby_whisper_context.c +216 -124
- data/ext/ruby_whisper_context_params.c +163 -0
- data/ext/ruby_whisper_model.c +0 -1
- data/ext/ruby_whisper_params.c +0 -1
- data/ext/ruby_whisper_segment.c +0 -1
- data/ext/ruby_whisper_token.c +29 -9
- data/ext/ruby_whisper_transcribe.cpp +4 -1
- data/ext/ruby_whisper_vad_context.c +48 -1
- data/ext/ruby_whisper_vad_context_detect.cpp +6 -5
- data/ext/ruby_whisper_vad_params.c +0 -1
- data/ext/ruby_whisper_vad_segment.c +0 -1
- data/ext/ruby_whisper_vad_segments.c +0 -1
- data/ext/sources/CMakeLists.txt +1 -1
- data/ext/sources/bindings/javascript/package.json +1 -1
- data/ext/sources/cmake/whisper-config.cmake.in +5 -40
- data/ext/sources/examples/bench/bench.cpp +23 -18
- data/ext/sources/examples/cli/cli.cpp +8 -0
- data/ext/sources/examples/common-ggml.cpp +2 -0
- data/ext/sources/examples/miniaudio.h +4507 -2131
- data/ext/sources/examples/server/server.cpp +18 -4
- data/ext/sources/examples/talk-llama/CMakeLists.txt +3 -2
- data/ext/sources/examples/talk-llama/llama-adapter.cpp +7 -13
- data/ext/sources/examples/talk-llama/llama-adapter.h +4 -3
- data/ext/sources/examples/talk-llama/llama-arch.cpp +335 -17
- data/ext/sources/examples/talk-llama/llama-arch.h +42 -0
- data/ext/sources/examples/talk-llama/llama-batch.cpp +3 -1
- data/ext/sources/examples/talk-llama/llama-chat.cpp +21 -1
- data/ext/sources/examples/talk-llama/llama-chat.h +1 -0
- data/ext/sources/examples/talk-llama/llama-context.cpp +508 -520
- data/ext/sources/examples/talk-llama/llama-context.h +27 -28
- data/ext/sources/examples/talk-llama/llama-cparams.h +5 -0
- data/ext/sources/examples/talk-llama/llama-ext.h +12 -0
- data/ext/sources/examples/talk-llama/llama-grammar.cpp +8 -8
- data/ext/sources/examples/talk-llama/llama-graph.cpp +583 -130
- data/ext/sources/examples/talk-llama/llama-graph.h +131 -10
- data/ext/sources/examples/talk-llama/llama-hparams.cpp +57 -40
- data/ext/sources/examples/talk-llama/llama-hparams.h +79 -10
- data/ext/sources/examples/talk-llama/llama-impl.cpp +4 -4
- data/ext/sources/examples/talk-llama/llama-impl.h +13 -1
- data/ext/sources/examples/talk-llama/llama-kv-cache-iswa.cpp +3 -1
- data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +274 -89
- data/ext/sources/examples/talk-llama/llama-kv-cache.h +2 -3
- data/ext/sources/examples/talk-llama/llama-memory-hybrid-iswa.cpp +275 -0
- data/ext/sources/examples/talk-llama/llama-memory-hybrid-iswa.h +140 -0
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.cpp +11 -13
- data/ext/sources/examples/talk-llama/llama-mmap.cpp +28 -11
- data/ext/sources/examples/talk-llama/llama-model-loader.cpp +527 -119
- data/ext/sources/examples/talk-llama/llama-model-loader.h +35 -5
- data/ext/sources/examples/talk-llama/llama-model-saver.cpp +60 -46
- data/ext/sources/examples/talk-llama/llama-model-saver.h +5 -2
- data/ext/sources/examples/talk-llama/llama-model.cpp +1365 -647
- data/ext/sources/examples/talk-llama/llama-model.h +72 -19
- data/ext/sources/examples/talk-llama/llama-quant.cpp +578 -346
- data/ext/sources/examples/talk-llama/{llama-sampling.cpp → llama-sampler.cpp} +190 -76
- data/ext/sources/examples/talk-llama/{llama-sampling.h → llama-sampler.h} +0 -2
- data/ext/sources/examples/talk-llama/llama-vocab.cpp +118 -48
- data/ext/sources/examples/talk-llama/llama-vocab.h +5 -0
- data/ext/sources/examples/talk-llama/llama.cpp +76 -22
- data/ext/sources/examples/talk-llama/llama.h +63 -30
- data/ext/sources/examples/talk-llama/models/afmoe.cpp +2 -3
- data/ext/sources/examples/talk-llama/models/apertus.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/arcee.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/arctic.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/baichuan.cpp +4 -3
- data/ext/sources/examples/talk-llama/models/bailingmoe.cpp +1 -2
- data/ext/sources/examples/talk-llama/models/bailingmoe2.cpp +3 -5
- data/ext/sources/examples/talk-llama/models/bert.cpp +13 -7
- data/ext/sources/examples/talk-llama/models/bitnet.cpp +9 -24
- data/ext/sources/examples/talk-llama/models/bloom.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/chameleon.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/chatglm.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/codeshell.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/cogvlm.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/cohere2-iswa.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/command-r.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/dbrx.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/deci.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/deepseek.cpp +4 -6
- data/ext/sources/examples/talk-llama/models/deepseek2.cpp +24 -21
- data/ext/sources/examples/talk-llama/models/delta-net-base.cpp +445 -0
- data/ext/sources/examples/talk-llama/models/dots1.cpp +4 -6
- data/ext/sources/examples/talk-llama/models/dream.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/ernie4-5-moe.cpp +4 -6
- data/ext/sources/examples/talk-llama/models/ernie4-5.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/eurobert.cpp +97 -0
- data/ext/sources/examples/talk-llama/models/exaone-moe.cpp +145 -0
- data/ext/sources/examples/talk-llama/models/exaone.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/exaone4.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/falcon-h1.cpp +2 -4
- data/ext/sources/examples/talk-llama/models/falcon.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/gemma-embedding.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma2-iswa.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma3.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma3n-iswa.cpp +7 -7
- data/ext/sources/examples/talk-llama/models/glm4-moe.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/glm4.cpp +14 -7
- data/ext/sources/examples/talk-llama/models/gpt2.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/gptneox.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/granite-hybrid.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/granite.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/grok.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/grovemoe.cpp +5 -7
- data/ext/sources/examples/talk-llama/models/hunyuan-dense.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/hunyuan-moe.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/internlm2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/jais.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/jais2.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/jamba.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/kimi-linear.cpp +381 -0
- data/ext/sources/examples/talk-llama/models/lfm2.cpp +145 -124
- data/ext/sources/examples/talk-llama/models/llada-moe.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/llada.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/llama-iswa.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/llama.cpp +18 -11
- data/ext/sources/examples/talk-llama/models/maincoder.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/{graph-context-mamba.cpp → mamba-base.cpp} +9 -3
- data/ext/sources/examples/talk-llama/models/mamba.cpp +1 -2
- data/ext/sources/examples/talk-llama/models/mimo2-iswa.cpp +11 -5
- data/ext/sources/examples/talk-llama/models/minicpm3.cpp +14 -13
- data/ext/sources/examples/talk-llama/models/minimax-m2.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/mistral3.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/models.h +181 -46
- data/ext/sources/examples/talk-llama/models/modern-bert.cpp +2 -9
- data/ext/sources/examples/talk-llama/models/mpt.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/nemotron-h.cpp +26 -14
- data/ext/sources/examples/talk-llama/models/nemotron.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/neo-bert.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/olmo.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/olmo2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/olmoe.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/openai-moe-iswa.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/openelm.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/orion.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/paddleocr.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/pangu-embedded.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/phi2.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/phi3.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/plamo.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/plamo2.cpp +9 -5
- data/ext/sources/examples/talk-llama/models/plamo3.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/plm.cpp +15 -14
- data/ext/sources/examples/talk-llama/models/qwen.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/qwen2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/qwen2moe.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/qwen2vl.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/qwen3.cpp +12 -9
- data/ext/sources/examples/talk-llama/models/qwen35.cpp +381 -0
- data/ext/sources/examples/talk-llama/models/qwen35moe.cpp +422 -0
- data/ext/sources/examples/talk-llama/models/qwen3moe.cpp +15 -8
- data/ext/sources/examples/talk-llama/models/qwen3next.cpp +84 -432
- data/ext/sources/examples/talk-llama/models/qwen3vl-moe.cpp +9 -18
- data/ext/sources/examples/talk-llama/models/qwen3vl.cpp +8 -17
- data/ext/sources/examples/talk-llama/models/refact.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/rnd1.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/rwkv6-base.cpp +2 -0
- data/ext/sources/examples/talk-llama/models/rwkv7-base.cpp +2 -0
- data/ext/sources/examples/talk-llama/models/seed-oss.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/smallthinker.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/smollm3.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/stablelm.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/starcoder.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/starcoder2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/step35-iswa.cpp +165 -0
- data/ext/sources/examples/talk-llama/models/t5-dec.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/t5-enc.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/xverse.cpp +3 -3
- data/ext/sources/examples/talk-llama/unicode.cpp +21 -65
- data/ext/sources/ggml/CMakeLists.txt +9 -3
- data/ext/sources/ggml/include/ggml-backend.h +1 -1
- data/ext/sources/ggml/include/ggml-cann.h +1 -1
- data/ext/sources/ggml/include/ggml-cpu.h +5 -0
- data/ext/sources/ggml/include/ggml-openvino.h +37 -0
- data/ext/sources/ggml/include/ggml-opt.h +1 -1
- data/ext/sources/ggml/include/ggml-rpc.h +6 -1
- data/ext/sources/ggml/include/ggml-virtgpu.h +14 -0
- data/ext/sources/ggml/include/ggml.h +56 -9
- data/ext/sources/ggml/src/CMakeLists.txt +3 -0
- data/ext/sources/ggml/src/ggml-alloc.c +4 -9
- data/ext/sources/ggml/src/ggml-backend-dl.cpp +48 -0
- data/ext/sources/ggml/src/ggml-backend-dl.h +45 -0
- data/ext/sources/ggml/src/ggml-backend-reg.cpp +28 -86
- data/ext/sources/ggml/src/ggml-backend.cpp +5 -2
- data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-blas/ggml-blas.cpp +6 -2
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.cpp +1 -1
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.h +1 -1
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.cpp +348 -189
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +40 -85
- data/ext/sources/ggml/src/ggml-cann/common.h +3 -4
- data/ext/sources/ggml/src/ggml-cann/ggml-cann.cpp +44 -62
- data/ext/sources/ggml/src/ggml-common.h +11 -0
- data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +16 -11
- data/ext/sources/ggml/src/ggml-cpu/amx/amx.cpp +42 -19
- data/ext/sources/ggml/src/ggml-cpu/amx/common.h +34 -10
- data/ext/sources/ggml/src/ggml-cpu/amx/mmq.cpp +85 -85
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/quants.c +85 -1
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/repack.cpp +2744 -548
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/quants.c +1653 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/repack.cpp +1391 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/s390/quants.c +8 -10
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/quants.c +9 -9
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/repack.cpp +118 -18
- data/ext/sources/ggml/src/ggml-cpu/arch-fallback.h +107 -26
- data/ext/sources/ggml/src/ggml-cpu/binary-ops.cpp +2 -6
- data/ext/sources/ggml/src/ggml-cpu/common.h +8 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-impl.h +3 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +59 -12
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.cpp +15 -0
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +21 -20
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +965 -252
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +584 -197
- data/ext/sources/ggml/src/ggml-cpu/ops.cpp +903 -188
- data/ext/sources/ggml/src/ggml-cpu/ops.h +1 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.c +40 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.h +3 -0
- data/ext/sources/ggml/src/ggml-cpu/repack.cpp +2890 -679
- data/ext/sources/ggml/src/ggml-cpu/repack.h +119 -8
- data/ext/sources/ggml/src/ggml-cpu/simd-gemm.h +136 -0
- data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +111 -3
- data/ext/sources/ggml/src/ggml-cpu/unary-ops.cpp +1 -1
- data/ext/sources/ggml/src/ggml-cpu/vec.cpp +17 -0
- data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-cuda/argsort.cu +19 -10
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +32 -30
- data/ext/sources/ggml/src/ggml-cuda/common.cuh +134 -18
- data/ext/sources/ggml/src/ggml-cuda/convert.cu +41 -27
- data/ext/sources/ggml/src/ggml-cuda/cpy.cu +6 -3
- data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +78 -64
- data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +384 -143
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cuh +36 -22
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec.cuh +3 -3
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +26 -5
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +1 -1
- data/ext/sources/ggml/src/ggml-cuda/fattn.cu +127 -12
- data/ext/sources/ggml/src/ggml-cuda/gated_delta_net.cu +263 -0
- data/ext/sources/ggml/src/ggml-cuda/gated_delta_net.cuh +4 -0
- data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +595 -200
- data/ext/sources/ggml/src/ggml-cuda/mean.cu +9 -8
- data/ext/sources/ggml/src/ggml-cuda/mma.cuh +173 -6
- data/ext/sources/ggml/src/ggml-cuda/mmf.cu +30 -10
- data/ext/sources/ggml/src/ggml-cuda/mmf.cuh +158 -85
- data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +34 -22
- data/ext/sources/ggml/src/ggml-cuda/mmvf.cu +127 -67
- data/ext/sources/ggml/src/ggml-cuda/mmvf.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +157 -65
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cuh +1 -0
- data/ext/sources/ggml/src/ggml-cuda/norm.cu +18 -76
- data/ext/sources/ggml/src/ggml-cuda/pad.cu +13 -10
- data/ext/sources/ggml/src/ggml-cuda/quantize.cu +1 -1
- data/ext/sources/ggml/src/ggml-cuda/reduce_rows.cuh +2 -16
- data/ext/sources/ggml/src/ggml-cuda/rope.cu +233 -133
- data/ext/sources/ggml/src/ggml-cuda/softmax.cu +8 -83
- data/ext/sources/ggml/src/ggml-cuda/solve_tri.cu +1 -1
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +56 -32
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cuh +1 -1
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_32.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_32.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +3 -3
- data/ext/sources/ggml/src/ggml-cuda/top-k.cu +0 -1
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cu +199 -135
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cuh +20 -14
- data/ext/sources/ggml/src/ggml-cuda/unary.cu +55 -0
- data/ext/sources/ggml/src/ggml-cuda/unary.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +31 -17
- data/ext/sources/ggml/src/ggml-cuda/vendors/hip.h +10 -0
- data/ext/sources/ggml/src/ggml-hexagon/CMakeLists.txt +82 -45
- data/ext/sources/ggml/src/ggml-hexagon/ggml-hexagon.cpp +334 -160
- data/ext/sources/ggml/src/ggml-hexagon/htp/CMakeLists.txt +7 -5
- data/ext/sources/ggml/src/ggml-hexagon/htp/act-ops.c +328 -197
- data/ext/sources/ggml/src/ggml-hexagon/htp/argsort-ops.c +281 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/binary-ops.c +765 -234
- data/ext/sources/ggml/src/ggml-hexagon/htp/cpy-ops.c +252 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/flash-attn-ops.c +412 -265
- data/ext/sources/ggml/src/ggml-hexagon/htp/get-rows-ops.c +23 -23
- data/ext/sources/ggml/src/ggml-hexagon/htp/{htp-dma.c → hex-dma.c} +1 -1
- data/ext/sources/ggml/src/ggml-hexagon/htp/{htp-dma.h → hex-dma.h} +28 -3
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-dump.h +77 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-fastdiv.h +37 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-utils.h +51 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ctx.h +1 -1
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-msg.h +27 -37
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ops.h +6 -35
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-arith.h +443 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-base.h +240 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-copy.h +245 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-div.h +251 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-dump.h +129 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-exp.h +215 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-floor.h +100 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-inverse.h +210 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-reduce.h +296 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-scale.h +133 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sigmoid.h +141 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sqrt.h +126 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-types.h +36 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-utils.h +20 -1347
- data/ext/sources/ggml/src/ggml-hexagon/htp/main.c +211 -13
- data/ext/sources/ggml/src/ggml-hexagon/htp/matmul-ops.c +1119 -952
- data/ext/sources/ggml/src/ggml-hexagon/htp/rope-ops.c +254 -244
- data/ext/sources/ggml/src/ggml-hexagon/htp/set-rows-ops.c +36 -36
- data/ext/sources/ggml/src/ggml-hexagon/htp/softmax-ops.c +155 -138
- data/ext/sources/ggml/src/ggml-hexagon/htp/ssm-conv.c +339 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/sum-rows-ops.c +128 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/unary-ops.c +209 -114
- data/ext/sources/ggml/src/ggml-hexagon/htp/worker-pool.c +1 -5
- data/ext/sources/ggml/src/ggml-hexagon/htp-drv.cpp +418 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp-drv.h +121 -0
- data/ext/sources/ggml/src/ggml-hexagon/libdl.h +79 -0
- data/ext/sources/ggml/src/ggml-hexagon/libggml-htp.inf +38 -0
- data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +6 -0
- data/ext/sources/ggml/src/ggml-impl.h +62 -0
- data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +10 -10
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-common.cpp +13 -2
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.h +8 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.m +147 -17
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.cpp +274 -73
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.h +22 -4
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.m +102 -36
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +174 -23
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.cpp +580 -280
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.h +5 -4
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.cpp +320 -107
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.metal +1068 -825
- data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +19 -1
- data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +3108 -636
- data/ext/sources/ggml/src/ggml-opencl/kernels/concat.cl +41 -99
- data/ext/sources/ggml/src/ggml-opencl/kernels/cpy.cl +45 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cumsum.cl +139 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +204 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/diag.cl +27 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/exp.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/expm1.cl +87 -56
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemm_noshuffle_q4_1_f32.cl +132 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general_q8_0_f32.cl +195 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_q4_1_f32.cl +283 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/l2_norm.cl +71 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mean.cl +114 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q4_0_f32_l4_lm.cl +163 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q4_1_f32_l4_lm.cl +165 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q6_k_f32_l4_lm.cl +158 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q8_0_f32_8x4.cl +129 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_1_f32.cl +219 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_1_f32_flat.cl +229 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_k_f32.cl +180 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/{mul_mv_q6_k.cl → mul_mv_q6_k_f32.cl} +4 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q6_k_f32_flat.cl +194 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/neg.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/repeat.cl +31 -32
- data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +14 -4
- data/ext/sources/ggml/src/ggml-opencl/kernels/softplus.cl +88 -60
- data/ext/sources/ggml/src/ggml-opencl/kernels/solve_tri.cl +51 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sum_rows.cl +114 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/tanh.cl +94 -48
- data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +26 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/tri.cl +32 -0
- data/ext/sources/ggml/src/ggml-openvino/.clang-format +154 -0
- data/ext/sources/ggml/src/ggml-openvino/CMakeLists.txt +22 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-decoder.cpp +975 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-decoder.h +294 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino-extra.cpp +373 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino-extra.h +182 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino.cpp +1110 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-quants.cpp +884 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-quants.h +153 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/decoder.h +74 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/frontend.cpp +27 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/frontend.h +23 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/input_model.cpp +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/input_model.h +29 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/node_context.h +112 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/cont.cpp +48 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/cpy.cpp +21 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/flash_attn_ext.cpp +90 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/get_rows.cpp +69 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/glu_geglu.cpp +61 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/glu_swiglu.cpp +62 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/mulmat.cpp +90 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/permute.cpp +102 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/reshape.cpp +83 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/rms_norm.cpp +46 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/rope.cpp +123 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/scale.cpp +41 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/set_rows.cpp +76 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/softmax.cpp +89 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/transpose.cpp +23 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/unary_silu.cpp +27 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/view.cpp +53 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op_table.cpp +46 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op_table.h +39 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/eliminate_zp.cpp +123 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/eliminate_zp.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/fuse_to_sdpa.cpp +60 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/fuse_to_sdpa.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/mark_decompression_convert_constant_folding.h +29 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/squeeze_matmul.cpp +58 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/squeeze_matmul.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/translate_session.cpp +293 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/translate_session.h +28 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/utils.cpp +226 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/utils.h +85 -0
- data/ext/sources/ggml/src/ggml-openvino/utils.cpp +823 -0
- data/ext/sources/ggml/src/ggml-openvino/utils.h +123 -0
- data/ext/sources/ggml/src/ggml-quants.c +96 -5
- data/ext/sources/ggml/src/ggml-quants.h +3 -0
- data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +15 -88
- data/ext/sources/ggml/src/ggml-sycl/add-id.cpp +5 -1
- data/ext/sources/ggml/src/ggml-sycl/backend.hpp +1 -0
- data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +21 -20
- data/ext/sources/ggml/src/ggml-sycl/common.hpp +315 -10
- data/ext/sources/ggml/src/ggml-sycl/convert.cpp +69 -1
- data/ext/sources/ggml/src/ggml-sycl/convert.hpp +22 -1
- data/ext/sources/ggml/src/ggml-sycl/count-equal.cpp +1 -1
- data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +791 -47
- data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +78 -68
- data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +2 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-common.hpp +1179 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-tile.cpp +55 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-tile.hpp +1338 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-vec.hpp +667 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn.cpp +225 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn.hpp +22 -0
- data/ext/sources/ggml/src/ggml-sycl/gated_delta_net.cpp +309 -0
- data/ext/sources/ggml/src/ggml-sycl/gated_delta_net.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/ggml-sycl.cpp +316 -51
- data/ext/sources/ggml/src/ggml-sycl/norm.cpp +65 -66
- data/ext/sources/ggml/src/ggml-sycl/outprod.cpp +3 -3
- data/ext/sources/ggml/src/ggml-sycl/presets.hpp +3 -0
- data/ext/sources/ggml/src/ggml-sycl/quants.hpp +1 -1
- data/ext/sources/ggml/src/ggml-sycl/rope.cpp +450 -287
- data/ext/sources/ggml/src/ggml-sycl/rope.hpp +6 -0
- data/ext/sources/ggml/src/ggml-sycl/softmax.cpp +6 -6
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq112-dv112.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq128-dv128.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq256-dv256.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq40-dv40.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq576-dv512.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq64-dv64.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq72-dv72.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq80-dv80.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq96-dv96.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +13 -0
- data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +1 -1
- data/ext/sources/ggml/src/ggml-virtgpu/CMakeLists.txt +70 -0
- data/ext/sources/ggml/src/ggml-virtgpu/apir_cs_ggml-rpc-front.cpp +87 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/CMakeLists.txt +21 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/apir_cs_ggml-rpc-back.cpp +115 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-convert.h +13 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-backend.cpp +102 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-buffer-type.cpp +105 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-buffer.cpp +179 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-device.cpp +148 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.cpp +51 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.gen.h +73 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.h +27 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-virgl-apir.h +32 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend.cpp +144 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/api_remoting.h +95 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_backend.gen.h +94 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_backend.h +50 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs.h +378 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs_ggml.h +232 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs_rpc.h +58 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-buffer-type.cpp +81 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-buffer.cpp +119 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-device.cpp +158 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-reg.cpp +213 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend.cpp +69 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-remoting.h +71 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggmlremoting_functions.yaml +166 -0
- data/ext/sources/ggml/src/ggml-virtgpu/include/apir_hw.h +9 -0
- data/ext/sources/ggml/src/ggml-virtgpu/regenerate_remoting.py +333 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-apir.h +15 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-backend.cpp +58 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-buffer-type.cpp +110 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-buffer.cpp +173 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-device.cpp +192 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-impl.h +36 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward.gen.h +53 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-shm.cpp +98 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-shm.h +23 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-utils.cpp +179 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-utils.h +86 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu.cpp +544 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu.h +117 -0
- data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/ggml-vulkan.cpp +1250 -465
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +16 -8
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/elu.comp +27 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +374 -170
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.glsl +66 -22
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +389 -201
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +106 -58
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_mask_opt.comp +162 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +9 -8
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gated_delta_net.comp +128 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +12 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.glsl +20 -17
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +11 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +8 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_id_funcs.glsl +3 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +5 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.glsl +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +2 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_funcs.glsl +36 -63
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_params.glsl +10 -5
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sgn.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp +16 -10
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +55 -35
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu-shader-lib.hpp +1314 -109
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu.cpp +1660 -1371
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argmax.wgsl +72 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argsort.wgsl +106 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argsort_merge.wgsl +134 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary.wgsl +141 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/common_decls.tmpl +65 -72
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/concat.wgsl +75 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cpy.tmpl.wgsl +6 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cumsum.wgsl +66 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/embed_wgsl.py +40 -5
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/flash_attn.wgsl +105 -60
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{get_rows.tmpl.wgsl → get_rows.wgsl} +53 -259
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat.tmpl.wgsl → mul_mat.wgsl} +68 -257
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_decls.tmpl +692 -23
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat_reg_tile.tmpl.wgsl → mul_mat_reg_tile.wgsl} +28 -128
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat_subgroup_matrix.tmpl.wgsl → mul_mat_subgroup_matrix.wgsl} +31 -137
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_vec.wgsl +480 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/pad.wgsl +86 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/repeat.wgsl +67 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{scale.tmpl.wgsl → scale.wgsl} +9 -36
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.wgsl +40 -12
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/sum_rows.wgsl +55 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/unary.wgsl +193 -0
- data/ext/sources/ggml/src/ggml-zdnn/ggml-zdnn.cpp +6 -1
- data/ext/sources/ggml/src/ggml-zendnn/CMakeLists.txt +31 -32
- data/ext/sources/ggml/src/ggml-zendnn/ggml-zendnn.cpp +9 -6
- data/ext/sources/ggml/src/ggml.c +167 -33
- data/ext/sources/ggml/src/gguf.cpp +229 -44
- data/ext/sources/src/whisper.cpp +6 -28
- data/sig/whisper.rbs +43 -2
- data/test/test_context_params.rb +82 -0
- data/test/test_token.rb +11 -0
- data/test/test_vad_context.rb +58 -8
- data/test/test_whisper.rb +20 -0
- data/whispercpp.gemspec +1 -1
- metadata +240 -28
- data/ext/sources/ggml/cmake/BuildTypes.cmake +0 -54
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm-ppc.h +0 -333
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-exp.c +0 -94
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-inverse.c +0 -72
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sigmoid.c +0 -49
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-utils.c +0 -1020
- data/ext/sources/ggml/src/ggml-hexagon/htp/ops-utils.h +0 -149
- data/ext/sources/ggml/src/ggml-hexagon/htp-utils.c +0 -454
- data/ext/sources/ggml/src/ggml-hexagon/htp-utils.h +0 -221
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/bin_op.tmpl.wgsl +0 -188
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary_head.tmpl +0 -45
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_vec.tmpl.wgsl +0 -267
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.tmpl.wgsl +0 -112
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/unary_op.wgsl +0 -483
|
@@ -1,10 +1,9 @@
|
|
|
1
|
-
#include "ggml.h"
|
|
2
1
|
#include "models.h"
|
|
3
2
|
|
|
4
|
-
#
|
|
3
|
+
#include "llama-memory-recurrent.h"
|
|
5
4
|
|
|
6
5
|
llm_build_qwen3next::llm_build_qwen3next(const llama_model & model, const llm_graph_params & params) :
|
|
7
|
-
|
|
6
|
+
llm_build_delta_net_base(params), model(model) {
|
|
8
7
|
ggml_tensor * cur;
|
|
9
8
|
ggml_tensor * inpL;
|
|
10
9
|
|
|
@@ -16,27 +15,18 @@ llm_build_qwen3next::llm_build_qwen3next(const llama_model & model, const llm_gr
|
|
|
16
15
|
ggml_tensor * inp_pos = build_inp_pos();
|
|
17
16
|
ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
18
17
|
|
|
19
|
-
ggml_tensor * causal_mask =
|
|
20
|
-
ggml_tri(ctx0, ggml_fill_inplace(ctx0, ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, CHUNK_SIZE, CHUNK_SIZE), 1.0f),
|
|
21
|
-
GGML_TRI_TYPE_LOWER);
|
|
22
|
-
|
|
23
|
-
ggml_tensor * identity = ggml_diag(ctx0, ggml_fill_inplace(ctx0, ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, CHUNK_SIZE), 1.0f));
|
|
24
|
-
ggml_tensor * diag_mask = ggml_add(ctx0, causal_mask, identity);
|
|
25
|
-
|
|
26
|
-
ggml_build_forward_expand(gf, causal_mask);
|
|
27
|
-
ggml_build_forward_expand(gf, identity);
|
|
28
|
-
ggml_build_forward_expand(gf, diag_mask);
|
|
29
|
-
|
|
30
18
|
for (int il = 0; il < n_layer; ++il) {
|
|
31
19
|
ggml_tensor * inpSA = inpL;
|
|
32
20
|
|
|
33
21
|
cur = build_norm(inpL, model.layers[il].attn_norm, nullptr, LLM_NORM_RMS, il);
|
|
34
22
|
cb(cur, "attn_norm", il);
|
|
35
23
|
|
|
24
|
+
ggml_build_forward_expand(gf, cur);
|
|
25
|
+
|
|
36
26
|
// Determine layer type and build appropriate attention mechanism
|
|
37
27
|
if (hparams.is_recurrent(il)) {
|
|
38
28
|
// Linear attention layer (gated delta net)
|
|
39
|
-
cur = build_layer_attn_linear(inp->get_recr(), cur,
|
|
29
|
+
cur = build_layer_attn_linear(inp->get_recr(), cur, il);
|
|
40
30
|
} else {
|
|
41
31
|
// Full attention layer
|
|
42
32
|
cur = build_layer_attn(inp->get_attn(), cur, inp_pos, il);
|
|
@@ -94,348 +84,6 @@ static ggml_tensor * get_slice_2d(ggml_context * ctx0, ggml_tensor * t, int64_t
|
|
|
94
84
|
t->nb[1], t->nb[2], t->nb[3], t->nb[2] * c);
|
|
95
85
|
}
|
|
96
86
|
|
|
97
|
-
std::pair<ggml_tensor *, ggml_tensor *> llm_build_qwen3next::build_delta_net_chunking(
|
|
98
|
-
ggml_tensor * q,
|
|
99
|
-
ggml_tensor * k,
|
|
100
|
-
ggml_tensor * v,
|
|
101
|
-
ggml_tensor * g,
|
|
102
|
-
ggml_tensor * beta,
|
|
103
|
-
ggml_tensor * state,
|
|
104
|
-
ggml_tensor * causal_mask,
|
|
105
|
-
ggml_tensor * identity,
|
|
106
|
-
ggml_tensor * diag_mask,
|
|
107
|
-
int il) {
|
|
108
|
-
const int64_t S_k = q->ne[0];
|
|
109
|
-
const int64_t H_k = q->ne[1];
|
|
110
|
-
const int64_t n_tokens = q->ne[2];
|
|
111
|
-
const int64_t n_seqs = q->ne[3];
|
|
112
|
-
|
|
113
|
-
const int64_t S_v = v->ne[0];
|
|
114
|
-
const int64_t H_v = v->ne[1];
|
|
115
|
-
|
|
116
|
-
GGML_ASSERT(v->ne[2] == n_tokens);
|
|
117
|
-
GGML_ASSERT(k->ne[2] == n_tokens);
|
|
118
|
-
GGML_ASSERT(g->ne[0] == H_v && g->ne[1] == n_tokens && g->ne[2] == n_seqs);
|
|
119
|
-
GGML_ASSERT(beta->ne[0] == H_v && beta->ne[2] == n_tokens && beta->ne[3] == n_seqs);
|
|
120
|
-
GGML_ASSERT(state->ne[0] == S_v && state->ne[1] == S_v * H_v && state->ne[2] == 1 && state->ne[3] == n_seqs);
|
|
121
|
-
|
|
122
|
-
GGML_ASSERT(q->ne[0] == S_k && q->ne[1] == H_k && q->ne[2] == n_tokens && q->ne[3] == n_seqs);
|
|
123
|
-
GGML_ASSERT(k->ne[0] == S_k && k->ne[1] == H_k && k->ne[2] == n_tokens && k->ne[3] == n_seqs);
|
|
124
|
-
|
|
125
|
-
GGML_ASSERT(H_k == H_v); // we did a repeat to make sure this is the case
|
|
126
|
-
|
|
127
|
-
const float eps_norm = hparams.f_norm_rms_eps;
|
|
128
|
-
|
|
129
|
-
q = ggml_l2_norm(ctx0, q, eps_norm);
|
|
130
|
-
k = ggml_l2_norm(ctx0, k, eps_norm);
|
|
131
|
-
|
|
132
|
-
const float scale = 1.0f / sqrtf(S_v);
|
|
133
|
-
|
|
134
|
-
q = ggml_scale(ctx0, q, scale);
|
|
135
|
-
|
|
136
|
-
beta = ggml_sigmoid(ctx0, beta);
|
|
137
|
-
|
|
138
|
-
cb(q, "q_in", il);
|
|
139
|
-
cb(k, "k_in", il);
|
|
140
|
-
cb(v, "v_in", il);
|
|
141
|
-
cb(beta, "beta_in", il);
|
|
142
|
-
cb(g, "g_in", il);
|
|
143
|
-
|
|
144
|
-
q = ggml_cont_4d(ctx0, ggml_permute(ctx0, q, 0, 2, 1, 3), S_v, n_tokens, H_v, n_seqs);
|
|
145
|
-
k = ggml_cont_4d(ctx0, ggml_permute(ctx0, k, 0, 2, 1, 3), S_v, n_tokens, H_v, n_seqs);
|
|
146
|
-
v = ggml_cont_4d(ctx0, ggml_permute(ctx0, v, 0, 2, 1, 3), S_v, n_tokens, H_v, n_seqs);
|
|
147
|
-
g = ggml_cont_4d(ctx0, ggml_permute(ctx0, g, 2, 0, 3, 1), n_tokens, 1, H_k, n_seqs);
|
|
148
|
-
|
|
149
|
-
beta = ggml_cont(ctx0, ggml_permute(ctx0, beta, 2, 0, 1, 3));
|
|
150
|
-
state = ggml_reshape_4d(ctx0, state, S_v, S_v, H_v, n_seqs);
|
|
151
|
-
|
|
152
|
-
cb(q, "q_perm", il);
|
|
153
|
-
cb(k, "k_perm", il);
|
|
154
|
-
cb(v, "v_perm", il);
|
|
155
|
-
cb(beta, "beta_perm", il);
|
|
156
|
-
cb(g, "g_perm", il);
|
|
157
|
-
cb(state, "state_in", il);
|
|
158
|
-
|
|
159
|
-
GGML_ASSERT(q->ne[1] == n_tokens && q->ne[0] == S_k && q->ne[2] == H_k && q->ne[3] == n_seqs);
|
|
160
|
-
GGML_ASSERT(k->ne[1] == n_tokens && k->ne[0] == S_k && k->ne[2] == H_k && k->ne[3] == n_seqs);
|
|
161
|
-
GGML_ASSERT(v->ne[1] == n_tokens && v->ne[0] == S_v && v->ne[2] == H_k && v->ne[3] == n_seqs);
|
|
162
|
-
GGML_ASSERT(beta->ne[1] == n_tokens && beta->ne[2] == H_k && beta->ne[0] == 1 && beta->ne[3] == n_seqs);
|
|
163
|
-
|
|
164
|
-
// Do padding
|
|
165
|
-
const int64_t chunk_size = CHUNK_SIZE;
|
|
166
|
-
|
|
167
|
-
const int64_t pad = (chunk_size - n_tokens % chunk_size) % chunk_size;
|
|
168
|
-
const int64_t n_chunks = (n_tokens + pad) / chunk_size;
|
|
169
|
-
|
|
170
|
-
q = ggml_pad(ctx0, q, 0, pad, 0, 0);
|
|
171
|
-
k = ggml_pad(ctx0, k, 0, pad, 0, 0);
|
|
172
|
-
v = ggml_pad(ctx0, v, 0, pad, 0, 0);
|
|
173
|
-
g = ggml_pad(ctx0, g, pad, 0, 0, 0);
|
|
174
|
-
beta = ggml_pad(ctx0, beta, 0, pad, 0, 0);
|
|
175
|
-
|
|
176
|
-
cb(q, "q_pad", il);
|
|
177
|
-
cb(k, "k_pad", il);
|
|
178
|
-
cb(v, "v_pad", il);
|
|
179
|
-
cb(beta, "beta_pad", il);
|
|
180
|
-
cb(g, "g_pad", il);
|
|
181
|
-
|
|
182
|
-
ggml_tensor * v_beta = ggml_mul(ctx0, v, beta);
|
|
183
|
-
ggml_tensor * k_beta = ggml_mul(ctx0, k, beta);
|
|
184
|
-
|
|
185
|
-
cb(v_beta, "v_beta", il);
|
|
186
|
-
cb(k_beta, "k_beta", il);
|
|
187
|
-
|
|
188
|
-
q = ggml_reshape_4d(ctx0, q, S_k, chunk_size, n_chunks, H_k * n_seqs);
|
|
189
|
-
k = ggml_reshape_4d(ctx0, k, S_k, chunk_size, n_chunks, H_k * n_seqs);
|
|
190
|
-
k_beta = ggml_reshape_4d(ctx0, k_beta, S_k, chunk_size, n_chunks, H_k * n_seqs);
|
|
191
|
-
v = ggml_reshape_4d(ctx0, v, S_v, chunk_size, n_chunks, H_v * n_seqs);
|
|
192
|
-
v_beta = ggml_reshape_4d(ctx0, v_beta, S_v, chunk_size, n_chunks, H_v * n_seqs);
|
|
193
|
-
|
|
194
|
-
g = ggml_reshape_4d(ctx0, g, chunk_size, 1, n_chunks, H_k * n_seqs);
|
|
195
|
-
beta = ggml_reshape_4d(ctx0, beta, 1, chunk_size, n_chunks, H_k * n_seqs);
|
|
196
|
-
|
|
197
|
-
ggml_tensor * g_cumsum = ggml_cumsum(ctx0, g);
|
|
198
|
-
cb(g_cumsum, "g_cumsum", il); // shape: (chunk_size, 1, n_chunks, H_v * n_seqs)
|
|
199
|
-
|
|
200
|
-
ggml_tensor * gcs_i = g_cumsum; // ggml_reshape_4d(ctx0, g_cumsum, chunk_size, 1, n_chunks, H_v * n_seqs);
|
|
201
|
-
ggml_tensor * gcs_j = ggml_reshape_4d(ctx0, g_cumsum, 1, chunk_size, n_chunks, H_v * n_seqs);
|
|
202
|
-
|
|
203
|
-
ggml_tensor * gcs_j_broadcast =
|
|
204
|
-
ggml_repeat_4d(ctx0, gcs_j, chunk_size, chunk_size, n_chunks, H_v * n_seqs);
|
|
205
|
-
|
|
206
|
-
ggml_tensor * decay_mask = ggml_sub(ctx0, gcs_j_broadcast, gcs_i);
|
|
207
|
-
cb(decay_mask, "decay_mask", il); // shape: (chunk_size, chunk_size, n_chunks, H_v * n_seqs)
|
|
208
|
-
|
|
209
|
-
decay_mask = ggml_mul(ctx0, decay_mask, diag_mask);
|
|
210
|
-
decay_mask = ggml_exp(ctx0, decay_mask);
|
|
211
|
-
decay_mask = ggml_mul(ctx0, decay_mask, diag_mask);
|
|
212
|
-
|
|
213
|
-
ggml_tensor * kmulkbeta = ggml_mul_mat(ctx0, k, k_beta);
|
|
214
|
-
|
|
215
|
-
ggml_tensor * k_decay = ggml_mul(ctx0, kmulkbeta, decay_mask);
|
|
216
|
-
ggml_tensor * attn = ggml_neg(ctx0, ggml_mul(ctx0, k_decay, causal_mask));
|
|
217
|
-
cb(attn, "attn_pre_solve", il); // shape: (chunk_size, chunk_size, n_chunks, H_v * n_seqs)
|
|
218
|
-
|
|
219
|
-
ggml_tensor * attn_lower = ggml_mul(ctx0, attn, causal_mask);
|
|
220
|
-
ggml_tensor * lhs = ggml_sub(ctx0, ggml_repeat(ctx0, identity, attn_lower), attn_lower);
|
|
221
|
-
|
|
222
|
-
ggml_tensor * lin_solve = ggml_solve_tri(ctx0, lhs, attn, true, true, false);
|
|
223
|
-
attn = ggml_mul(ctx0, lin_solve, causal_mask);
|
|
224
|
-
attn = ggml_add(ctx0, attn, identity);
|
|
225
|
-
cb(attn, "attn_solved", il); // shape: (chunk_size, chunk_size, n_chunks, H_v * n_seqs)
|
|
226
|
-
|
|
227
|
-
v = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, v_beta)), attn);
|
|
228
|
-
|
|
229
|
-
ggml_tensor * g_cumsum_t = ggml_cont(ctx0, ggml_transpose(ctx0, g_cumsum));
|
|
230
|
-
ggml_tensor * gexp = ggml_exp(ctx0, g_cumsum_t);
|
|
231
|
-
|
|
232
|
-
ggml_tensor * kbeta_gexp = ggml_mul(ctx0, k_beta, gexp);
|
|
233
|
-
cb(kbeta_gexp, "kbeta_gexp", il); // shape: (S_k, chunk_size, n_chunks, H_v * n_seqs)
|
|
234
|
-
|
|
235
|
-
ggml_tensor * k_cumdecay =
|
|
236
|
-
ggml_cont(ctx0, ggml_transpose(ctx0, ggml_mul_mat(ctx0, attn, ggml_cont(ctx0, ggml_transpose(ctx0, kbeta_gexp)))));
|
|
237
|
-
cb(k_cumdecay, "k_cumdecay", il); // shape: (chunk_size, chunk_size, n_chunks, H_v * n_seqs)
|
|
238
|
-
|
|
239
|
-
ggml_tensor * attn_kq = ggml_mul_mat(ctx0, k, q);
|
|
240
|
-
attn_kq = ggml_mul(ctx0, attn_kq, decay_mask);
|
|
241
|
-
attn_kq = ggml_mul(ctx0, attn_kq, diag_mask);
|
|
242
|
-
cb(attn_kq, "attn_kq", il); // shape: (chunk_size, chunk_size, n_chunks, H_v * n_seqs)
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
// vectorized calculation of key_gdiff
|
|
246
|
-
// improved from the chunked version:
|
|
247
|
-
// g_last = torch.clamp(g_cum[:, :, -1], max=50.0).exp().unsqueeze(-1).unsqueeze(-1)
|
|
248
|
-
// g_diff = torch.clamp(g_cum[:, :, -1:] - g_cum, max=50.0).exp()
|
|
249
|
-
// key_gdiff = key * g_diff.unsqueeze(-1)
|
|
250
|
-
// kgdmulvnew = (key_gdiff).transpose(-1, -2) @ v_new
|
|
251
|
-
// last_recurrent_state = last_recurrent_state * g_last + kgdmulvnew
|
|
252
|
-
|
|
253
|
-
// get last element in g_cumsum along chunk_size dimension (ne0)
|
|
254
|
-
// example: [[x, y, z, ..., last], ...] -> [[last], ...]
|
|
255
|
-
ggml_tensor * g_last = ggml_view_4d(ctx0, g_cumsum, 1, 1, g_cumsum->ne[2], g_cumsum->ne[3],
|
|
256
|
-
g_cumsum->nb[1], g_cumsum->nb[2], g_cumsum->nb[3],
|
|
257
|
-
(g_cumsum->ne[0] - 1) * ggml_element_size(g_cumsum));
|
|
258
|
-
g_last = ggml_cont(ctx0, g_last);
|
|
259
|
-
cb(g_last, "g_last", il); // shape: (1, 1, n_chunks, H_v * n_seqs)
|
|
260
|
-
|
|
261
|
-
ggml_tensor * g_last_exp = ggml_exp(ctx0, g_last);
|
|
262
|
-
cb(g_last_exp, "g_last_exp", il); // shape: (1, 1, n_chunks, H_v * n_seqs)
|
|
263
|
-
|
|
264
|
-
ggml_tensor * g_diff = ggml_neg(ctx0, ggml_sub(ctx0, g_cumsum, g_last));
|
|
265
|
-
cb(g_diff, "g_diff", il); // shape: (chunk_size, 1, n_chunks, H_v * n_seqs)
|
|
266
|
-
|
|
267
|
-
ggml_tensor * g_diff_exp = ggml_exp(ctx0, g_diff);
|
|
268
|
-
ggml_tensor * key_gdiff = ggml_mul(ctx0, k, g_diff_exp);
|
|
269
|
-
cb(key_gdiff, "key_gdiff", il); // shape: (S_k, chunk_size, n_chunks, H_v * n_seqs)
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
// state to be updated per chunk
|
|
273
|
-
ggml_tensor * new_state = state; // ggml_dup(ctx0, state);
|
|
274
|
-
cb(new_state, "new_state", il); // shape: (S_v, S_v, H_v, n_seqs)
|
|
275
|
-
|
|
276
|
-
// shape after loop of chunks: (S_v, chunk_size, n_chunks, H_v * n_seqs)
|
|
277
|
-
ggml_tensor * core_attn_out = nullptr;
|
|
278
|
-
|
|
279
|
-
for (int64_t chunk = 0; chunk < n_chunks; chunk++) {
|
|
280
|
-
// shape: (S_k, chunk_size, 1, H_k * n_seqs)
|
|
281
|
-
ggml_tensor * q_chunk = get_slice_2d(ctx0, q, chunk); // (no cont), next op: ggml_mul
|
|
282
|
-
|
|
283
|
-
// shape: (S_v, chunk_size, 1, H_v * n_seqs)
|
|
284
|
-
ggml_tensor * v_chunk = get_slice_2d(ctx0, v, chunk); // (no cont), next op: ggml_repeat
|
|
285
|
-
|
|
286
|
-
// shape: (chunk_size, 1, n_chunks, H_v * n_seqs)
|
|
287
|
-
ggml_tensor * gexp_chunk = get_slice_2d(ctx0, gexp, chunk); // (no cont), next op: ggml_mul
|
|
288
|
-
|
|
289
|
-
// shape: (chunk_size, 1, H_v * n_seqs)
|
|
290
|
-
ggml_tensor * k_cumdecay_chunk = get_slice_2d(ctx0, k_cumdecay, chunk); // (no cont), next op: ggml_mul_mat
|
|
291
|
-
|
|
292
|
-
// attn = (q_i @ k_i.transpose(-1, -2) * decay_mask[:, :, i]).masked_fill_(mask, 0)
|
|
293
|
-
// replaced by precomputed attn_kq
|
|
294
|
-
ggml_tensor * attn_chunk = get_slice_2d(ctx0, attn_kq, chunk);
|
|
295
|
-
cb(attn_chunk, "attn_chunk", il);
|
|
296
|
-
|
|
297
|
-
ggml_tensor * state_t = ggml_cont_4d(ctx0, ggml_permute(ctx0, new_state, 1, 0, 2, 3), S_v, S_v, 1, H_v * n_seqs);
|
|
298
|
-
|
|
299
|
-
// v_prime = (k_cumdecay[:, :, i]) @ last_recurrent_state
|
|
300
|
-
ggml_tensor * v_prime = ggml_mul_mat(ctx0, state_t, k_cumdecay_chunk);
|
|
301
|
-
cb(v_prime, "v_prime_chunk", il); // shape: (S_v, 1, H_v * n_seqs)
|
|
302
|
-
|
|
303
|
-
// v_new = v_i - v_prime
|
|
304
|
-
ggml_tensor * v_new = ggml_sub(ctx0, ggml_repeat(ctx0, v_chunk, v_prime), v_prime);
|
|
305
|
-
ggml_tensor * v_new_t = ggml_cont(ctx0, ggml_transpose(ctx0, v_new));
|
|
306
|
-
cb(v_new, "v_new_chunk", il);
|
|
307
|
-
|
|
308
|
-
// attn_inter = (q_i * g[:, :, i, :, None].exp()) @ last_recurrent_state
|
|
309
|
-
ggml_tensor * q_g_exp = ggml_mul(ctx0, q_chunk, gexp_chunk);
|
|
310
|
-
ggml_tensor * attn_inter = ggml_mul_mat(ctx0, state_t, q_g_exp);
|
|
311
|
-
cb(attn_inter, "attn_inter_chunk", il);
|
|
312
|
-
|
|
313
|
-
// core_attn_out[:, :, i] = attn_inter + attn @ v_new
|
|
314
|
-
ggml_tensor * v_attn = ggml_mul_mat(ctx0, v_new_t, attn_chunk);
|
|
315
|
-
cb(v_attn, "v_attn_chunk", il);
|
|
316
|
-
|
|
317
|
-
ggml_tensor * core_attn_out_chunk = ggml_add(ctx0, attn_inter, v_attn);
|
|
318
|
-
cb(core_attn_out_chunk, "core_attn_out_chunk", il); // shape: (S_v, chunk_size, 1, H_v * n_seqs)
|
|
319
|
-
|
|
320
|
-
core_attn_out = core_attn_out == nullptr
|
|
321
|
-
? core_attn_out_chunk
|
|
322
|
-
: ggml_concat(ctx0, core_attn_out, core_attn_out_chunk, 2);
|
|
323
|
-
|
|
324
|
-
// kgdmulvnew = (key_gdiff).transpose(-1, -2) @ v_new
|
|
325
|
-
ggml_tensor * k_gdiff = ggml_cont(ctx0, get_slice_2d(ctx0, key_gdiff, chunk));
|
|
326
|
-
//ggml_tensor * kgdmulvnew = ggml_mul_mat(ctx0, k_gdiff, v_new); // this is slower on metal, why?
|
|
327
|
-
ggml_tensor * kgdmulvnew = ggml_mul_mat(ctx0, v_new_t, ggml_cont(ctx0, ggml_transpose(ctx0, k_gdiff)));
|
|
328
|
-
|
|
329
|
-
// last_recurrent_state = last_recurrent_state * g_last + kgdmulvnew
|
|
330
|
-
ggml_tensor * gexp_last_chunk = ggml_cont(ctx0, get_slice_2d(ctx0, g_last_exp, chunk));
|
|
331
|
-
new_state = ggml_add(ctx0,
|
|
332
|
-
ggml_mul(ctx0, new_state, ggml_reshape_4d(ctx0, gexp_last_chunk, gexp_last_chunk->ne[0], gexp_last_chunk->ne[1], H_v, n_seqs)),
|
|
333
|
-
ggml_reshape_4d(ctx0, kgdmulvnew, kgdmulvnew->ne[0], kgdmulvnew->ne[1], H_v, n_seqs));
|
|
334
|
-
}
|
|
335
|
-
|
|
336
|
-
// truncate padded tokens
|
|
337
|
-
ggml_tensor * output_tokens = ggml_view_4d(ctx0, core_attn_out,
|
|
338
|
-
S_v, n_tokens, H_v, n_seqs,
|
|
339
|
-
ggml_row_size(core_attn_out->type, S_v),
|
|
340
|
-
ggml_row_size(core_attn_out->type, S_v * chunk_size * n_chunks),
|
|
341
|
-
ggml_row_size(core_attn_out->type, S_v * chunk_size * n_chunks * H_v), 0);
|
|
342
|
-
output_tokens = ggml_cont(ctx0, output_tokens);
|
|
343
|
-
cb(output_tokens, "output_tokens", il);
|
|
344
|
-
|
|
345
|
-
// permute back to (S_v, H_v, n_tokens, n_seqs)
|
|
346
|
-
output_tokens = ggml_permute(ctx0, output_tokens, 0, 2, 1, 3);
|
|
347
|
-
output_tokens = ggml_cont(ctx0, output_tokens);
|
|
348
|
-
|
|
349
|
-
return {output_tokens, new_state};
|
|
350
|
-
}
|
|
351
|
-
|
|
352
|
-
std::pair<ggml_tensor *, ggml_tensor *> llm_build_qwen3next::build_delta_net_autoregressive(
|
|
353
|
-
ggml_tensor * q,
|
|
354
|
-
ggml_tensor * k,
|
|
355
|
-
ggml_tensor * v,
|
|
356
|
-
ggml_tensor * g,
|
|
357
|
-
ggml_tensor * beta,
|
|
358
|
-
ggml_tensor * state,
|
|
359
|
-
int il) {
|
|
360
|
-
const int64_t S_k = q->ne[0];
|
|
361
|
-
const int64_t H_k = q->ne[1];
|
|
362
|
-
const int64_t n_tokens = q->ne[2];
|
|
363
|
-
const int64_t n_seqs = q->ne[3];
|
|
364
|
-
|
|
365
|
-
const int64_t S_v = v->ne[0];
|
|
366
|
-
const int64_t H_v = v->ne[1];
|
|
367
|
-
|
|
368
|
-
GGML_ASSERT(n_tokens == 1); // This function is optimized for single token processing
|
|
369
|
-
GGML_ASSERT(v->ne[2] == n_tokens);
|
|
370
|
-
GGML_ASSERT(k->ne[2] == n_tokens);
|
|
371
|
-
GGML_ASSERT(g->ne[0] == H_v && g->ne[1] == n_tokens && g->ne[2] == n_seqs);
|
|
372
|
-
GGML_ASSERT(beta->ne[0] == H_v && beta->ne[2] == n_tokens && beta->ne[3] == n_seqs);
|
|
373
|
-
GGML_ASSERT(state->ne[0] == S_v && state->ne[1] == S_v * H_v && state->ne[2] == 1 && state->ne[3] == n_seqs);
|
|
374
|
-
|
|
375
|
-
GGML_ASSERT(q->ne[0] == S_k && q->ne[1] == H_k && q->ne[2] == n_tokens && q->ne[3] == n_seqs);
|
|
376
|
-
GGML_ASSERT(k->ne[0] == S_k && k->ne[1] == H_k && k->ne[2] == n_tokens && k->ne[3] == n_seqs);
|
|
377
|
-
|
|
378
|
-
GGML_ASSERT(H_k == H_v); // we did a repeat to make sure this is the case
|
|
379
|
-
|
|
380
|
-
const float eps_norm = hparams.f_norm_rms_eps;
|
|
381
|
-
|
|
382
|
-
q = ggml_l2_norm(ctx0, q, eps_norm);
|
|
383
|
-
k = ggml_l2_norm(ctx0, k, eps_norm);
|
|
384
|
-
|
|
385
|
-
const float scale = 1.0f / sqrtf(S_v);
|
|
386
|
-
|
|
387
|
-
q = ggml_scale(ctx0, q, scale);
|
|
388
|
-
beta = ggml_sigmoid(ctx0, beta);
|
|
389
|
-
|
|
390
|
-
cb(q, "q_in", il);
|
|
391
|
-
cb(k, "k_in", il);
|
|
392
|
-
cb(v, "v_in", il);
|
|
393
|
-
cb(beta, "beta_in", il);
|
|
394
|
-
cb(g, "g_in", il);
|
|
395
|
-
|
|
396
|
-
state = ggml_reshape_4d(ctx0, state, S_v, S_v, H_v, n_seqs);
|
|
397
|
-
|
|
398
|
-
ggml_tensor * g_t = ggml_reshape_4d(ctx0, ggml_transpose(ctx0, g), 1, 1, H_k, n_seqs);
|
|
399
|
-
ggml_tensor * beta_t = ggml_reshape_4d(ctx0, ggml_transpose(ctx0, beta), 1, 1, H_k, n_seqs);
|
|
400
|
-
|
|
401
|
-
// Apply exponential to g_t
|
|
402
|
-
g_t = ggml_exp(ctx0, g_t);
|
|
403
|
-
|
|
404
|
-
// Apply the gated delta rule for the single timestep
|
|
405
|
-
// last_recurrent_state = last_recurrent_state * g_t
|
|
406
|
-
state = ggml_mul(ctx0, state, g_t);
|
|
407
|
-
|
|
408
|
-
// kv_mem = (last_recurrent_state * k_t.unsqueeze(-1)).sum(dim=-2)
|
|
409
|
-
ggml_tensor * k_t_unsqueezed = ggml_reshape_4d(ctx0, k, 1, S_v, H_v, n_seqs);
|
|
410
|
-
ggml_tensor * kv_mem = ggml_mul(ctx0, state, k_t_unsqueezed);
|
|
411
|
-
// we need to sum over dim=-2, so we transpose, sum, then transpose again
|
|
412
|
-
kv_mem = ggml_transpose(ctx0, ggml_sum_rows(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, kv_mem))));
|
|
413
|
-
|
|
414
|
-
// v_t = v.unsqueeze(2) (we insert the singleton dimension after n_seqs and H_v)
|
|
415
|
-
ggml_tensor * v_t = ggml_reshape_4d(ctx0, v, S_v, 1, H_v, n_seqs);
|
|
416
|
-
// delta = (v_t - kv_mem) * beta_t
|
|
417
|
-
ggml_tensor * v_diff = ggml_sub(ctx0, v_t, kv_mem); // both should be [S_v, 1, H_v, n_seqs]
|
|
418
|
-
ggml_tensor * delta = ggml_mul(ctx0, v_diff, beta_t);
|
|
419
|
-
|
|
420
|
-
// last_recurrent_state = last_recurrent_state + k_t.unsqueeze(-1) * delta
|
|
421
|
-
ggml_tensor * k_t_delta = ggml_mul(ctx0, ggml_repeat_4d(ctx0, k_t_unsqueezed, S_v, S_v, H_v, n_seqs), delta);
|
|
422
|
-
state = ggml_add(ctx0, state, k_t_delta);
|
|
423
|
-
|
|
424
|
-
// Compute the attention output
|
|
425
|
-
// core_attn_out = (last_recurrent_state * q_t.unsqueeze(-1)).sum(dim=-2)
|
|
426
|
-
ggml_tensor * q_t_unsqueezed = ggml_reshape_4d(ctx0, q, 1, S_v, H_v, n_seqs); // unsqueeze q_t
|
|
427
|
-
ggml_tensor * state_q = ggml_mul(ctx0, state, q_t_unsqueezed);
|
|
428
|
-
// again, since it's over dim = -2, transpose, sum, transpose back
|
|
429
|
-
ggml_tensor * core_attn_out =
|
|
430
|
-
ggml_transpose(ctx0, ggml_sum_rows(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, state_q))));
|
|
431
|
-
|
|
432
|
-
// core_attn_out should be [S_v, 1, H_v, n_seqs] after this
|
|
433
|
-
cb(core_attn_out, "output_tokens", il);
|
|
434
|
-
cb(state, "new_state", il);
|
|
435
|
-
|
|
436
|
-
return {core_attn_out, state};
|
|
437
|
-
}
|
|
438
|
-
|
|
439
87
|
ggml_tensor * llm_build_qwen3next::build_norm_gated(
|
|
440
88
|
ggml_tensor * input,
|
|
441
89
|
ggml_tensor * weights,
|
|
@@ -452,8 +100,8 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn(
|
|
|
452
100
|
ggml_tensor * cur,
|
|
453
101
|
ggml_tensor * inp_pos,
|
|
454
102
|
int il) {
|
|
455
|
-
const int64_t n_embd_head = hparams.n_embd_head_v;
|
|
456
|
-
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
|
|
103
|
+
const int64_t n_embd_head = hparams.n_embd_head_v();
|
|
104
|
+
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k());
|
|
457
105
|
|
|
458
106
|
// Order: joint QG projection, QG split, Q norm, KV projection, K norm, RoPE, attention
|
|
459
107
|
|
|
@@ -466,39 +114,29 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn(
|
|
|
466
114
|
// Split Q projection into query and gate
|
|
467
115
|
// The split should be along dimension 0 (the feature dimension)
|
|
468
116
|
ggml_tensor * Qcur = ggml_view_4d(ctx0, Qcur_full, n_embd_head, n_head, n_tokens, 1,
|
|
469
|
-
|
|
117
|
+
Qcur_full->nb[1], Qcur_full->nb[2], Qcur_full->nb[3], 0);
|
|
118
|
+
cb(Qcur, "Qcur_view", il);
|
|
119
|
+
|
|
470
120
|
ggml_tensor * gate =
|
|
471
121
|
ggml_view_4d(ctx0, Qcur_full, n_embd_head, n_head, n_tokens, 1,
|
|
472
122
|
Qcur_full->nb[1], Qcur_full->nb[2], Qcur_full->nb[3], n_embd_head * ggml_element_size(Qcur_full));
|
|
473
|
-
cb(Qcur, "Qcur", il);
|
|
474
123
|
cb(gate, "gate", il);
|
|
475
124
|
|
|
476
|
-
// Now reshape Qcur to [n_embd_head, n_head, n_tokens] for multi-head attention
|
|
477
|
-
Qcur = ggml_cont_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
|
|
478
|
-
cb(Qcur, "Qcur_reshaped", il);
|
|
479
|
-
|
|
480
|
-
// Apply Q normalization
|
|
481
|
-
Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, nullptr, LLM_NORM_RMS, il);
|
|
482
|
-
cb(Qcur, "Qcur_normed", il);
|
|
483
|
-
|
|
484
125
|
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
|
|
485
126
|
cb(Kcur, "Kcur", il);
|
|
486
127
|
|
|
487
128
|
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
|
|
488
129
|
cb(Vcur, "Vcur", il);
|
|
489
130
|
|
|
490
|
-
// Apply K normalization
|
|
491
131
|
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
|
|
492
|
-
|
|
493
|
-
cb(Kcur, "Kcur_normed", il);
|
|
132
|
+
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
|
|
494
133
|
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
cb(gate, "gate_reshaped", il);
|
|
134
|
+
Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, nullptr, LLM_NORM_RMS, il);
|
|
135
|
+
cb(Qcur, "Qcur_normed", il);
|
|
498
136
|
|
|
499
|
-
|
|
137
|
+
Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, nullptr, LLM_NORM_RMS, il);
|
|
138
|
+
cb(Kcur, "Kcur_normed", il);
|
|
500
139
|
|
|
501
|
-
// Apply RoPE
|
|
502
140
|
Qcur = ggml_rope_ext(
|
|
503
141
|
ctx0, Qcur, inp_pos, nullptr,
|
|
504
142
|
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
@@ -513,7 +151,6 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn(
|
|
|
513
151
|
cb(Kcur, "Kcur", il);
|
|
514
152
|
cb(Vcur, "Vcur", il);
|
|
515
153
|
|
|
516
|
-
// Attention computation
|
|
517
154
|
const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f / sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
|
|
518
155
|
|
|
519
156
|
cur = build_attn(inp,
|
|
@@ -521,10 +158,15 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn(
|
|
|
521
158
|
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
|
|
522
159
|
cb(cur, "attn_pregate", il);
|
|
523
160
|
|
|
524
|
-
|
|
525
|
-
|
|
161
|
+
// TODO: CUDA is missing non-contiguous unary ops. when implemented: remove this cont
|
|
162
|
+
gate = ggml_cont_2d(ctx0, gate, n_embd_head * n_head, n_tokens);
|
|
163
|
+
|
|
164
|
+
gate = ggml_sigmoid(ctx0, gate);
|
|
165
|
+
cb(gate, "gate_sigmoid", il);
|
|
526
166
|
|
|
527
|
-
|
|
167
|
+
gate = ggml_reshape_2d(ctx0, gate, n_embd_head * n_head, n_tokens);
|
|
168
|
+
|
|
169
|
+
cur = ggml_mul(ctx0, cur, gate);
|
|
528
170
|
cb(cur, "attn_gated", il);
|
|
529
171
|
|
|
530
172
|
cur = build_lora_mm(model.layers[il].wo, cur);
|
|
@@ -554,7 +196,6 @@ std::pair<ggml_tensor *, ggml_tensor *> llm_build_qwen3next::build_qkvz(
|
|
|
554
196
|
cb(z, "z", il);
|
|
555
197
|
|
|
556
198
|
return { qkv_mixed, z };
|
|
557
|
-
|
|
558
199
|
} else {
|
|
559
200
|
// legacy (slower) path
|
|
560
201
|
ggml_tensor * mixed_qkvz = build_lora_mm(model.layers[il].ssm_in, input);
|
|
@@ -618,9 +259,6 @@ std::pair<ggml_tensor *, ggml_tensor *> llm_build_qwen3next::build_qkvz(
|
|
|
618
259
|
ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|
619
260
|
llm_graph_input_rs * inp,
|
|
620
261
|
ggml_tensor * cur,
|
|
621
|
-
ggml_tensor * causal_mask,
|
|
622
|
-
ggml_tensor * identity,
|
|
623
|
-
ggml_tensor * diag_mask,
|
|
624
262
|
int il) {
|
|
625
263
|
const auto * mctx_cur = inp->mctx;
|
|
626
264
|
|
|
@@ -665,7 +303,10 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|
|
665
303
|
split_sizes_ba[0] * ggml_element_size(mixed_ba_reshaped));
|
|
666
304
|
cb(a, "a", il);
|
|
667
305
|
|
|
668
|
-
|
|
306
|
+
// TODO: CUDA is missing non-contiguous unary ops. when implemented: remove this cont
|
|
307
|
+
b = ggml_cont(ctx0, b);
|
|
308
|
+
|
|
309
|
+
ggml_tensor * beta = ggml_sigmoid(ctx0, b);
|
|
669
310
|
|
|
670
311
|
// Reshape a to merge head dimensions: [batch, seq_len, num_k_heads, num_v_heads/num_k_heads] -> [batch, seq_len, num_v_heads]
|
|
671
312
|
ggml_tensor * alpha = ggml_cont_3d(ctx0, a, num_v_heads, n_seq_tokens, n_seqs);
|
|
@@ -673,15 +314,17 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|
|
673
314
|
ggml_tensor * alpha_biased = ggml_add(ctx0, alpha, model.layers[il].ssm_dt);
|
|
674
315
|
ggml_tensor * alpha_softplus = ggml_softplus(ctx0, alpha_biased);
|
|
675
316
|
cb(alpha_softplus, "a_softplus", il);
|
|
317
|
+
|
|
676
318
|
ggml_tensor * gate = ggml_mul(ctx0, alpha_softplus, model.layers[il].ssm_a); // -A_log.exp() * softplus
|
|
677
319
|
cb(gate, "gate", il);
|
|
678
320
|
|
|
321
|
+
beta = ggml_reshape_4d(ctx0, beta, 1, num_v_heads, n_seq_tokens, n_seqs);
|
|
322
|
+
gate = ggml_reshape_4d(ctx0, gate, 1, num_v_heads, n_seq_tokens, n_seqs);
|
|
323
|
+
|
|
679
324
|
// Get convolution states from cache
|
|
680
325
|
ggml_tensor * conv_states_all = mctx_cur->get_r_l(il);
|
|
681
326
|
ggml_tensor * ssm_states_all = mctx_cur->get_s_l(il);
|
|
682
327
|
|
|
683
|
-
// bool use_precomputed_states = n_seq_tokens == 1 && mctx_cur->has_previous_state();
|
|
684
|
-
|
|
685
328
|
// Build the convolution states tensor
|
|
686
329
|
ggml_tensor * conv_states = build_rs(inp, conv_states_all, hparams.n_embd_r(), n_seqs);
|
|
687
330
|
cb(conv_states, "conv_states", il);
|
|
@@ -690,11 +333,12 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|
|
690
333
|
ggml_tensor * conv_kernel = model.layers[il].ssm_conv1d;
|
|
691
334
|
const int64_t conv_kernel_size = conv_kernel->ne[0];
|
|
692
335
|
const int64_t conv_channels = d_inner + 2 * hparams.ssm_n_group * hparams.ssm_d_state;
|
|
693
|
-
|
|
336
|
+
|
|
337
|
+
conv_states = ggml_reshape_3d(ctx0, conv_states, conv_kernel_size - 1, conv_channels, n_seqs);
|
|
694
338
|
cb(conv_states, "conv_states_reshaped", il);
|
|
695
339
|
|
|
696
|
-
qkv_mixed =
|
|
697
|
-
cb(qkv_mixed, "
|
|
340
|
+
qkv_mixed = ggml_transpose(ctx0, qkv_mixed);
|
|
341
|
+
cb(qkv_mixed, "qkv_mixed_transposed", il);
|
|
698
342
|
|
|
699
343
|
ggml_tensor * conv_input = ggml_concat(ctx0, conv_states, qkv_mixed, 0);
|
|
700
344
|
cb(conv_input, "conv_input", il);
|
|
@@ -712,9 +356,11 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|
|
712
356
|
cb(state_update_target, "state_update_target", il);
|
|
713
357
|
|
|
714
358
|
ggml_build_forward_expand(gf, ggml_cpy(ctx0, last_conv_states, state_update_target));
|
|
715
|
-
cb(conv_states_all, "conv_states_updated", il);
|
|
716
359
|
|
|
717
|
-
|
|
360
|
+
ggml_tensor * state = build_rs(inp, ssm_states_all, hparams.n_embd_s(), n_seqs);
|
|
361
|
+
state = ggml_reshape_4d(ctx0, state, head_v_dim, head_v_dim, num_v_heads, n_seqs);
|
|
362
|
+
cb(state, "state_predelta", il);
|
|
363
|
+
|
|
718
364
|
ggml_tensor * conv_output_proper = ggml_ssm_conv(ctx0, conv_input, conv_kernel);
|
|
719
365
|
cb(conv_output_proper, "conv_output_raw", il);
|
|
720
366
|
|
|
@@ -728,28 +374,39 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|
|
728
374
|
int64_t nb1_qkv = ggml_row_size(conv_qkv_mix->type, qkv_dim);
|
|
729
375
|
|
|
730
376
|
// Extract the convolved Q, K, V from conv_output
|
|
731
|
-
ggml_tensor * q_conv =
|
|
732
|
-
|
|
377
|
+
ggml_tensor * q_conv = ggml_view_4d(ctx0, conv_qkv_mix, head_k_dim, num_k_heads, n_seq_tokens, n_seqs,
|
|
378
|
+
ggml_row_size(conv_qkv_mix->type, head_k_dim),
|
|
379
|
+
nb1_qkv,
|
|
380
|
+
nb1_qkv * n_seq_tokens,
|
|
381
|
+
0);
|
|
382
|
+
|
|
383
|
+
ggml_tensor * k_conv = ggml_view_4d(ctx0, conv_qkv_mix, head_k_dim, num_k_heads, n_seq_tokens, n_seqs,
|
|
384
|
+
ggml_row_size(conv_qkv_mix->type, head_k_dim),
|
|
385
|
+
nb1_qkv,
|
|
386
|
+
nb1_qkv * n_seq_tokens,
|
|
387
|
+
head_k_dim * num_k_heads * ggml_element_size(conv_qkv_mix));
|
|
388
|
+
|
|
389
|
+
ggml_tensor * v_conv = ggml_view_4d(ctx0, conv_qkv_mix, head_v_dim, num_v_heads, n_seq_tokens, n_seqs,
|
|
390
|
+
ggml_row_size(conv_qkv_mix->type, head_v_dim),
|
|
391
|
+
nb1_qkv,
|
|
392
|
+
nb1_qkv * n_seq_tokens,
|
|
393
|
+
ggml_row_size(conv_qkv_mix->type, 2 * head_k_dim * num_k_heads));
|
|
394
|
+
|
|
733
395
|
cb(q_conv, "q_conv", il);
|
|
734
|
-
ggml_tensor * k_conv =
|
|
735
|
-
ggml_view_2d(ctx0, conv_qkv_mix, head_k_dim * num_k_heads, n_seq_tokens * n_seqs, nb1_qkv,
|
|
736
|
-
head_k_dim * num_k_heads * ggml_element_size(conv_qkv_mix));
|
|
737
396
|
cb(k_conv, "k_conv", il);
|
|
738
|
-
ggml_tensor * v_conv =
|
|
739
|
-
ggml_view_2d(ctx0, conv_qkv_mix, head_v_dim * num_v_heads, n_seq_tokens * n_seqs, nb1_qkv,
|
|
740
|
-
2 * head_k_dim * num_k_heads * ggml_element_size(conv_qkv_mix));
|
|
741
397
|
cb(v_conv, "v_conv", il);
|
|
742
398
|
|
|
743
|
-
|
|
744
|
-
q_conv = ggml_cont_4d(ctx0, q_conv, head_k_dim, num_k_heads, n_seq_tokens, n_seqs);
|
|
745
|
-
k_conv = ggml_cont_4d(ctx0, k_conv, head_k_dim, num_k_heads, n_seq_tokens, n_seqs);
|
|
746
|
-
v_conv = ggml_cont_4d(ctx0, v_conv, head_v_dim, num_v_heads, n_seq_tokens, n_seqs);
|
|
399
|
+
const float eps_norm = hparams.f_norm_rms_eps;
|
|
747
400
|
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
401
|
+
q_conv = ggml_l2_norm(ctx0, q_conv, eps_norm);
|
|
402
|
+
k_conv = ggml_l2_norm(ctx0, k_conv, eps_norm);
|
|
403
|
+
|
|
404
|
+
//q_conv = ggml_cont_4d(ctx0, q_conv, head_k_dim, num_k_heads, n_seq_tokens, n_seqs);
|
|
405
|
+
//k_conv = ggml_cont_4d(ctx0, k_conv, head_k_dim, num_k_heads, n_seq_tokens, n_seqs);
|
|
406
|
+
//v_conv = ggml_cont_4d(ctx0, v_conv, head_v_dim, num_v_heads, n_seq_tokens, n_seqs);
|
|
751
407
|
|
|
752
408
|
// if head keys and value keys are different, repeat to force tensors into matching shapes
|
|
409
|
+
// TODO: avoid repeats for fused GDN, needs broadcast configuration for GDN op [TAG_GGML_GDN_BCAST]
|
|
753
410
|
if (num_k_heads != num_v_heads) {
|
|
754
411
|
GGML_ASSERT(num_v_heads % num_k_heads == 0);
|
|
755
412
|
int64_t repeat_factor = num_v_heads / num_k_heads;
|
|
@@ -775,13 +432,8 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|
|
775
432
|
cb(k_conv, "k_conv_predelta", il);
|
|
776
433
|
cb(v_conv, "v_conv_predelta", il);
|
|
777
434
|
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
if (n_seq_tokens == 1) {
|
|
781
|
-
attn_out = build_delta_net_autoregressive(q_conv, k_conv, v_conv, gate, beta, state, il);
|
|
782
|
-
} else {
|
|
783
|
-
attn_out = build_delta_net_chunking(q_conv, k_conv, v_conv, gate, beta, state, causal_mask, identity, diag_mask, il);
|
|
784
|
-
}
|
|
435
|
+
auto attn_out = build_delta_net(q_conv, k_conv, v_conv, gate, beta, state, il);
|
|
436
|
+
|
|
785
437
|
ggml_tensor * output = attn_out.first;
|
|
786
438
|
ggml_tensor * new_state = attn_out.second;
|
|
787
439
|
cb(output, "attn_output", il);
|
|
@@ -789,19 +441,15 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|
|
789
441
|
|
|
790
442
|
// Update the recurrent states
|
|
791
443
|
ggml_build_forward_expand(gf,
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
// Reshape both attn_out_final and z to 2D tensors for normalization
|
|
797
|
-
// attn_out_final: [head_dim, n_heads, n_tokens, n_seqs] -> [n_heads * n_tokens * n_seqs, head_dim]
|
|
798
|
-
ggml_tensor * attn_out_2d_final = ggml_reshape_2d(ctx0, output, head_v_dim, num_v_heads * n_seq_tokens * n_seqs);
|
|
444
|
+
ggml_cpy(ctx0, new_state,
|
|
445
|
+
ggml_view_1d(ctx0, ssm_states_all, hparams.n_embd_s() * n_seqs,
|
|
446
|
+
kv_head * hparams.n_embd_s() * ggml_element_size(ssm_states_all))));
|
|
799
447
|
|
|
800
448
|
// z: [head_dim, n_heads, n_tokens, n_seqs] -> [n_heads * n_tokens * n_seqs, head_dim]
|
|
801
|
-
ggml_tensor * z_2d =
|
|
449
|
+
ggml_tensor * z_2d = ggml_reshape_4d(ctx0, z, head_v_dim, num_v_heads, n_seq_tokens, n_seqs);
|
|
802
450
|
|
|
803
451
|
// Apply gated normalization: self.norm(core_attn_out, z)
|
|
804
|
-
ggml_tensor * attn_out_norm = build_norm_gated(
|
|
452
|
+
ggml_tensor * attn_out_norm = build_norm_gated(output, model.layers[il].ssm_norm, z_2d, il);
|
|
805
453
|
|
|
806
454
|
// Final reshape: [head_dim, n_heads, n_tokens, n_seqs] -> [n_tokens, n_seqs, n_heads * head_dim]
|
|
807
455
|
ggml_tensor * final_output = ggml_reshape_3d(ctx0, attn_out_norm, head_v_dim * num_v_heads, n_seq_tokens, n_seqs);
|
|
@@ -812,7 +460,8 @@ ggml_tensor * llm_build_qwen3next::build_layer_attn_linear(
|
|
|
812
460
|
cb(cur, "linear_attn_out", il);
|
|
813
461
|
|
|
814
462
|
// Reshape back to original dimensions
|
|
815
|
-
cur =
|
|
463
|
+
cur = ggml_reshape_2d(ctx0, cur, n_embd, n_seq_tokens * n_seqs);
|
|
464
|
+
|
|
816
465
|
return cur;
|
|
817
466
|
}
|
|
818
467
|
|
|
@@ -822,18 +471,23 @@ ggml_tensor * llm_build_qwen3next::build_layer_ffn(ggml_tensor * cur, const int
|
|
|
822
471
|
// MoE branch
|
|
823
472
|
ggml_tensor * moe_out =
|
|
824
473
|
build_moe_ffn(cur,
|
|
825
|
-
model.layers[il].ffn_gate_inp,
|
|
826
|
-
model.layers[il].
|
|
474
|
+
model.layers[il].ffn_gate_inp,
|
|
475
|
+
model.layers[il].ffn_up_exps,
|
|
476
|
+
model.layers[il].ffn_gate_exps,
|
|
477
|
+
model.layers[il].ffn_down_exps,
|
|
827
478
|
nullptr,
|
|
828
|
-
n_expert, n_expert_used,
|
|
829
|
-
|
|
479
|
+
n_expert, n_expert_used,
|
|
480
|
+
LLM_FFN_SILU, true,
|
|
481
|
+
hparams.expert_weights_scale,
|
|
482
|
+
LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, il,
|
|
483
|
+
nullptr, model.layers[il].ffn_gate_up_exps);
|
|
830
484
|
cb(moe_out, "ffn_moe_out", il);
|
|
831
485
|
|
|
832
486
|
// Add shared experts if present - following Qwen3Next reference implementation
|
|
833
487
|
if (model.layers[il].ffn_up_shexp != nullptr) {
|
|
834
488
|
ggml_tensor * ffn_shexp =
|
|
835
489
|
build_ffn(cur,
|
|
836
|
-
model.layers[il].ffn_up_shexp,
|
|
490
|
+
model.layers[il].ffn_up_shexp, NULL, NULL,
|
|
837
491
|
model.layers[il].ffn_gate_shexp, NULL, NULL,
|
|
838
492
|
model.layers[il].ffn_down_shexp, NULL, NULL,
|
|
839
493
|
NULL,
|
|
@@ -846,11 +500,9 @@ ggml_tensor * llm_build_qwen3next::build_layer_ffn(ggml_tensor * cur, const int
|
|
|
846
500
|
ggml_tensor * shared_gate = build_lora_mm(model.layers[il].ffn_gate_inp_shexp, cur);
|
|
847
501
|
cb(shared_gate, "shared_expert_gate", il);
|
|
848
502
|
|
|
849
|
-
// Apply sigmoid to the gate
|
|
850
503
|
shared_gate = ggml_sigmoid(ctx0, shared_gate);
|
|
851
504
|
cb(shared_gate, "shared_expert_gate_sigmoid", il);
|
|
852
505
|
|
|
853
|
-
// Apply the gate to the shared expert output
|
|
854
506
|
ffn_shexp = ggml_mul(ctx0, ffn_shexp, shared_gate);
|
|
855
507
|
cb(ffn_shexp, "ffn_shexp_gated", il);
|
|
856
508
|
|