whispercpp 1.3.5 → 1.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +1 -1
- data/README.md +99 -2
- data/ext/extconf.rb +1 -0
- data/ext/ruby_whisper.c +20 -4
- data/ext/ruby_whisper.h +30 -2
- data/ext/ruby_whisper_context.c +216 -124
- data/ext/ruby_whisper_context_params.c +163 -0
- data/ext/ruby_whisper_model.c +0 -1
- data/ext/ruby_whisper_params.c +0 -1
- data/ext/ruby_whisper_segment.c +0 -1
- data/ext/ruby_whisper_token.c +29 -9
- data/ext/ruby_whisper_transcribe.cpp +4 -1
- data/ext/ruby_whisper_vad_context.c +48 -1
- data/ext/ruby_whisper_vad_context_detect.cpp +6 -5
- data/ext/ruby_whisper_vad_params.c +0 -1
- data/ext/ruby_whisper_vad_segment.c +0 -1
- data/ext/ruby_whisper_vad_segments.c +0 -1
- data/ext/sources/CMakeLists.txt +1 -1
- data/ext/sources/bindings/javascript/package.json +1 -1
- data/ext/sources/cmake/whisper-config.cmake.in +5 -40
- data/ext/sources/examples/bench/bench.cpp +23 -18
- data/ext/sources/examples/cli/cli.cpp +8 -0
- data/ext/sources/examples/common-ggml.cpp +2 -0
- data/ext/sources/examples/miniaudio.h +4507 -2131
- data/ext/sources/examples/server/server.cpp +18 -4
- data/ext/sources/examples/talk-llama/CMakeLists.txt +3 -2
- data/ext/sources/examples/talk-llama/llama-adapter.cpp +7 -13
- data/ext/sources/examples/talk-llama/llama-adapter.h +4 -3
- data/ext/sources/examples/talk-llama/llama-arch.cpp +335 -17
- data/ext/sources/examples/talk-llama/llama-arch.h +42 -0
- data/ext/sources/examples/talk-llama/llama-batch.cpp +3 -1
- data/ext/sources/examples/talk-llama/llama-chat.cpp +21 -1
- data/ext/sources/examples/talk-llama/llama-chat.h +1 -0
- data/ext/sources/examples/talk-llama/llama-context.cpp +508 -520
- data/ext/sources/examples/talk-llama/llama-context.h +27 -28
- data/ext/sources/examples/talk-llama/llama-cparams.h +5 -0
- data/ext/sources/examples/talk-llama/llama-ext.h +12 -0
- data/ext/sources/examples/talk-llama/llama-grammar.cpp +8 -8
- data/ext/sources/examples/talk-llama/llama-graph.cpp +583 -130
- data/ext/sources/examples/talk-llama/llama-graph.h +131 -10
- data/ext/sources/examples/talk-llama/llama-hparams.cpp +57 -40
- data/ext/sources/examples/talk-llama/llama-hparams.h +79 -10
- data/ext/sources/examples/talk-llama/llama-impl.cpp +4 -4
- data/ext/sources/examples/talk-llama/llama-impl.h +13 -1
- data/ext/sources/examples/talk-llama/llama-kv-cache-iswa.cpp +3 -1
- data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +274 -89
- data/ext/sources/examples/talk-llama/llama-kv-cache.h +2 -3
- data/ext/sources/examples/talk-llama/llama-memory-hybrid-iswa.cpp +275 -0
- data/ext/sources/examples/talk-llama/llama-memory-hybrid-iswa.h +140 -0
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.cpp +11 -13
- data/ext/sources/examples/talk-llama/llama-mmap.cpp +28 -11
- data/ext/sources/examples/talk-llama/llama-model-loader.cpp +527 -119
- data/ext/sources/examples/talk-llama/llama-model-loader.h +35 -5
- data/ext/sources/examples/talk-llama/llama-model-saver.cpp +60 -46
- data/ext/sources/examples/talk-llama/llama-model-saver.h +5 -2
- data/ext/sources/examples/talk-llama/llama-model.cpp +1365 -647
- data/ext/sources/examples/talk-llama/llama-model.h +72 -19
- data/ext/sources/examples/talk-llama/llama-quant.cpp +578 -346
- data/ext/sources/examples/talk-llama/{llama-sampling.cpp → llama-sampler.cpp} +190 -76
- data/ext/sources/examples/talk-llama/{llama-sampling.h → llama-sampler.h} +0 -2
- data/ext/sources/examples/talk-llama/llama-vocab.cpp +118 -48
- data/ext/sources/examples/talk-llama/llama-vocab.h +5 -0
- data/ext/sources/examples/talk-llama/llama.cpp +76 -22
- data/ext/sources/examples/talk-llama/llama.h +63 -30
- data/ext/sources/examples/talk-llama/models/afmoe.cpp +2 -3
- data/ext/sources/examples/talk-llama/models/apertus.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/arcee.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/arctic.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/baichuan.cpp +4 -3
- data/ext/sources/examples/talk-llama/models/bailingmoe.cpp +1 -2
- data/ext/sources/examples/talk-llama/models/bailingmoe2.cpp +3 -5
- data/ext/sources/examples/talk-llama/models/bert.cpp +13 -7
- data/ext/sources/examples/talk-llama/models/bitnet.cpp +9 -24
- data/ext/sources/examples/talk-llama/models/bloom.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/chameleon.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/chatglm.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/codeshell.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/cogvlm.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/cohere2-iswa.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/command-r.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/dbrx.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/deci.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/deepseek.cpp +4 -6
- data/ext/sources/examples/talk-llama/models/deepseek2.cpp +24 -21
- data/ext/sources/examples/talk-llama/models/delta-net-base.cpp +445 -0
- data/ext/sources/examples/talk-llama/models/dots1.cpp +4 -6
- data/ext/sources/examples/talk-llama/models/dream.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/ernie4-5-moe.cpp +4 -6
- data/ext/sources/examples/talk-llama/models/ernie4-5.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/eurobert.cpp +97 -0
- data/ext/sources/examples/talk-llama/models/exaone-moe.cpp +145 -0
- data/ext/sources/examples/talk-llama/models/exaone.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/exaone4.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/falcon-h1.cpp +2 -4
- data/ext/sources/examples/talk-llama/models/falcon.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/gemma-embedding.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma2-iswa.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma3.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/gemma3n-iswa.cpp +7 -7
- data/ext/sources/examples/talk-llama/models/glm4-moe.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/glm4.cpp +14 -7
- data/ext/sources/examples/talk-llama/models/gpt2.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/gptneox.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/granite-hybrid.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/granite.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/grok.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/grovemoe.cpp +5 -7
- data/ext/sources/examples/talk-llama/models/hunyuan-dense.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/hunyuan-moe.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/internlm2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/jais.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/jais2.cpp +123 -0
- data/ext/sources/examples/talk-llama/models/jamba.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/kimi-linear.cpp +381 -0
- data/ext/sources/examples/talk-llama/models/lfm2.cpp +145 -124
- data/ext/sources/examples/talk-llama/models/llada-moe.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/llada.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/llama-iswa.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/llama.cpp +18 -11
- data/ext/sources/examples/talk-llama/models/maincoder.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/{graph-context-mamba.cpp → mamba-base.cpp} +9 -3
- data/ext/sources/examples/talk-llama/models/mamba.cpp +1 -2
- data/ext/sources/examples/talk-llama/models/mimo2-iswa.cpp +11 -5
- data/ext/sources/examples/talk-llama/models/minicpm3.cpp +14 -13
- data/ext/sources/examples/talk-llama/models/minimax-m2.cpp +4 -5
- data/ext/sources/examples/talk-llama/models/mistral3.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/models.h +181 -46
- data/ext/sources/examples/talk-llama/models/modern-bert.cpp +2 -9
- data/ext/sources/examples/talk-llama/models/mpt.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/nemotron-h.cpp +26 -14
- data/ext/sources/examples/talk-llama/models/nemotron.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/neo-bert.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/olmo.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/olmo2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/olmoe.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/openai-moe-iswa.cpp +1 -1
- data/ext/sources/examples/talk-llama/models/openelm.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/orion.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/paddleocr.cpp +122 -0
- data/ext/sources/examples/talk-llama/models/pangu-embedded.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/phi2.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/phi3.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/plamo.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/plamo2.cpp +9 -5
- data/ext/sources/examples/talk-llama/models/plamo3.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/plm.cpp +15 -14
- data/ext/sources/examples/talk-llama/models/qwen.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/qwen2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/qwen2moe.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/qwen2vl.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/qwen3.cpp +12 -9
- data/ext/sources/examples/talk-llama/models/qwen35.cpp +381 -0
- data/ext/sources/examples/talk-llama/models/qwen35moe.cpp +422 -0
- data/ext/sources/examples/talk-llama/models/qwen3moe.cpp +15 -8
- data/ext/sources/examples/talk-llama/models/qwen3next.cpp +84 -432
- data/ext/sources/examples/talk-llama/models/qwen3vl-moe.cpp +9 -18
- data/ext/sources/examples/talk-llama/models/qwen3vl.cpp +8 -17
- data/ext/sources/examples/talk-llama/models/refact.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/rnd1.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/rwkv6-base.cpp +2 -0
- data/ext/sources/examples/talk-llama/models/rwkv7-base.cpp +2 -0
- data/ext/sources/examples/talk-llama/models/seed-oss.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/smallthinker.cpp +4 -4
- data/ext/sources/examples/talk-llama/models/smollm3.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/stablelm.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/starcoder.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/starcoder2.cpp +3 -3
- data/ext/sources/examples/talk-llama/models/step35-iswa.cpp +165 -0
- data/ext/sources/examples/talk-llama/models/t5-dec.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/t5-enc.cpp +2 -2
- data/ext/sources/examples/talk-llama/models/xverse.cpp +3 -3
- data/ext/sources/examples/talk-llama/unicode.cpp +21 -65
- data/ext/sources/ggml/CMakeLists.txt +9 -3
- data/ext/sources/ggml/include/ggml-backend.h +1 -1
- data/ext/sources/ggml/include/ggml-cann.h +1 -1
- data/ext/sources/ggml/include/ggml-cpu.h +5 -0
- data/ext/sources/ggml/include/ggml-openvino.h +37 -0
- data/ext/sources/ggml/include/ggml-opt.h +1 -1
- data/ext/sources/ggml/include/ggml-rpc.h +6 -1
- data/ext/sources/ggml/include/ggml-virtgpu.h +14 -0
- data/ext/sources/ggml/include/ggml.h +56 -9
- data/ext/sources/ggml/src/CMakeLists.txt +3 -0
- data/ext/sources/ggml/src/ggml-alloc.c +4 -9
- data/ext/sources/ggml/src/ggml-backend-dl.cpp +48 -0
- data/ext/sources/ggml/src/ggml-backend-dl.h +45 -0
- data/ext/sources/ggml/src/ggml-backend-reg.cpp +28 -86
- data/ext/sources/ggml/src/ggml-backend.cpp +5 -2
- data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-blas/ggml-blas.cpp +6 -2
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.cpp +1 -1
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.h +1 -1
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.cpp +348 -189
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +40 -85
- data/ext/sources/ggml/src/ggml-cann/common.h +3 -4
- data/ext/sources/ggml/src/ggml-cann/ggml-cann.cpp +44 -62
- data/ext/sources/ggml/src/ggml-common.h +11 -0
- data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +16 -11
- data/ext/sources/ggml/src/ggml-cpu/amx/amx.cpp +42 -19
- data/ext/sources/ggml/src/ggml-cpu/amx/common.h +34 -10
- data/ext/sources/ggml/src/ggml-cpu/amx/mmq.cpp +85 -85
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/quants.c +85 -1
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/repack.cpp +2744 -548
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/quants.c +1653 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/repack.cpp +1391 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/s390/quants.c +8 -10
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/quants.c +9 -9
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/repack.cpp +118 -18
- data/ext/sources/ggml/src/ggml-cpu/arch-fallback.h +107 -26
- data/ext/sources/ggml/src/ggml-cpu/binary-ops.cpp +2 -6
- data/ext/sources/ggml/src/ggml-cpu/common.h +8 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-impl.h +3 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +59 -12
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.cpp +15 -0
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +21 -20
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +965 -252
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +584 -197
- data/ext/sources/ggml/src/ggml-cpu/ops.cpp +903 -188
- data/ext/sources/ggml/src/ggml-cpu/ops.h +1 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.c +40 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.h +3 -0
- data/ext/sources/ggml/src/ggml-cpu/repack.cpp +2890 -679
- data/ext/sources/ggml/src/ggml-cpu/repack.h +119 -8
- data/ext/sources/ggml/src/ggml-cpu/simd-gemm.h +136 -0
- data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +111 -3
- data/ext/sources/ggml/src/ggml-cpu/unary-ops.cpp +1 -1
- data/ext/sources/ggml/src/ggml-cpu/vec.cpp +17 -0
- data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-cuda/argsort.cu +19 -10
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +32 -30
- data/ext/sources/ggml/src/ggml-cuda/common.cuh +134 -18
- data/ext/sources/ggml/src/ggml-cuda/convert.cu +41 -27
- data/ext/sources/ggml/src/ggml-cuda/cpy.cu +6 -3
- data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +78 -64
- data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +384 -143
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cuh +36 -22
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec.cuh +3 -3
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +26 -5
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +1 -1
- data/ext/sources/ggml/src/ggml-cuda/fattn.cu +127 -12
- data/ext/sources/ggml/src/ggml-cuda/gated_delta_net.cu +263 -0
- data/ext/sources/ggml/src/ggml-cuda/gated_delta_net.cuh +4 -0
- data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +595 -200
- data/ext/sources/ggml/src/ggml-cuda/mean.cu +9 -8
- data/ext/sources/ggml/src/ggml-cuda/mma.cuh +173 -6
- data/ext/sources/ggml/src/ggml-cuda/mmf.cu +30 -10
- data/ext/sources/ggml/src/ggml-cuda/mmf.cuh +158 -85
- data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +34 -22
- data/ext/sources/ggml/src/ggml-cuda/mmvf.cu +127 -67
- data/ext/sources/ggml/src/ggml-cuda/mmvf.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +157 -65
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cuh +1 -0
- data/ext/sources/ggml/src/ggml-cuda/norm.cu +18 -76
- data/ext/sources/ggml/src/ggml-cuda/pad.cu +13 -10
- data/ext/sources/ggml/src/ggml-cuda/quantize.cu +1 -1
- data/ext/sources/ggml/src/ggml-cuda/reduce_rows.cuh +2 -16
- data/ext/sources/ggml/src/ggml-cuda/rope.cu +233 -133
- data/ext/sources/ggml/src/ggml-cuda/softmax.cu +8 -83
- data/ext/sources/ggml/src/ggml-cuda/solve_tri.cu +1 -1
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +56 -32
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cuh +1 -1
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_32.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_32.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +1 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +3 -3
- data/ext/sources/ggml/src/ggml-cuda/top-k.cu +0 -1
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cu +199 -135
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cuh +20 -14
- data/ext/sources/ggml/src/ggml-cuda/unary.cu +55 -0
- data/ext/sources/ggml/src/ggml-cuda/unary.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +31 -17
- data/ext/sources/ggml/src/ggml-cuda/vendors/hip.h +10 -0
- data/ext/sources/ggml/src/ggml-hexagon/CMakeLists.txt +82 -45
- data/ext/sources/ggml/src/ggml-hexagon/ggml-hexagon.cpp +334 -160
- data/ext/sources/ggml/src/ggml-hexagon/htp/CMakeLists.txt +7 -5
- data/ext/sources/ggml/src/ggml-hexagon/htp/act-ops.c +328 -197
- data/ext/sources/ggml/src/ggml-hexagon/htp/argsort-ops.c +281 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/binary-ops.c +765 -234
- data/ext/sources/ggml/src/ggml-hexagon/htp/cpy-ops.c +252 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/flash-attn-ops.c +412 -265
- data/ext/sources/ggml/src/ggml-hexagon/htp/get-rows-ops.c +23 -23
- data/ext/sources/ggml/src/ggml-hexagon/htp/{htp-dma.c → hex-dma.c} +1 -1
- data/ext/sources/ggml/src/ggml-hexagon/htp/{htp-dma.h → hex-dma.h} +28 -3
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-dump.h +77 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-fastdiv.h +37 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hex-utils.h +51 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ctx.h +1 -1
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-msg.h +27 -37
- data/ext/sources/ggml/src/ggml-hexagon/htp/htp-ops.h +6 -35
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-arith.h +443 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-base.h +240 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-copy.h +245 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-div.h +251 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-dump.h +129 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-exp.h +215 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-floor.h +100 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-inverse.h +210 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-reduce.h +296 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-scale.h +133 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sigmoid.h +141 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sqrt.h +126 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-types.h +36 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-utils.h +20 -1347
- data/ext/sources/ggml/src/ggml-hexagon/htp/main.c +211 -13
- data/ext/sources/ggml/src/ggml-hexagon/htp/matmul-ops.c +1119 -952
- data/ext/sources/ggml/src/ggml-hexagon/htp/rope-ops.c +254 -244
- data/ext/sources/ggml/src/ggml-hexagon/htp/set-rows-ops.c +36 -36
- data/ext/sources/ggml/src/ggml-hexagon/htp/softmax-ops.c +155 -138
- data/ext/sources/ggml/src/ggml-hexagon/htp/ssm-conv.c +339 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/sum-rows-ops.c +128 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp/unary-ops.c +209 -114
- data/ext/sources/ggml/src/ggml-hexagon/htp/worker-pool.c +1 -5
- data/ext/sources/ggml/src/ggml-hexagon/htp-drv.cpp +418 -0
- data/ext/sources/ggml/src/ggml-hexagon/htp-drv.h +121 -0
- data/ext/sources/ggml/src/ggml-hexagon/libdl.h +79 -0
- data/ext/sources/ggml/src/ggml-hexagon/libggml-htp.inf +38 -0
- data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +6 -0
- data/ext/sources/ggml/src/ggml-impl.h +62 -0
- data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +10 -10
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-common.cpp +13 -2
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.h +8 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.m +147 -17
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.cpp +274 -73
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.h +22 -4
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.m +102 -36
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +174 -23
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.cpp +580 -280
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.h +5 -4
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.cpp +320 -107
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.metal +1068 -825
- data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +19 -1
- data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +3108 -636
- data/ext/sources/ggml/src/ggml-opencl/kernels/concat.cl +41 -99
- data/ext/sources/ggml/src/ggml-opencl/kernels/cpy.cl +45 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cumsum.cl +139 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +204 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/diag.cl +27 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/exp.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/expm1.cl +87 -56
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemm_noshuffle_q4_1_f32.cl +132 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general_q8_0_f32.cl +195 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_q4_1_f32.cl +283 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/l2_norm.cl +71 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mean.cl +114 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q4_0_f32_l4_lm.cl +163 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q4_1_f32_l4_lm.cl +165 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q6_k_f32_l4_lm.cl +158 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_q8_0_f32_8x4.cl +129 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_1_f32.cl +219 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_1_f32_flat.cl +229 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_k_f32.cl +180 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/{mul_mv_q6_k.cl → mul_mv_q6_k_f32.cl} +4 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q6_k_f32_flat.cl +194 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/neg.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/repeat.cl +31 -32
- data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +14 -4
- data/ext/sources/ggml/src/ggml-opencl/kernels/softplus.cl +88 -60
- data/ext/sources/ggml/src/ggml-opencl/kernels/solve_tri.cl +51 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sum_rows.cl +114 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/tanh.cl +94 -48
- data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +26 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/tri.cl +32 -0
- data/ext/sources/ggml/src/ggml-openvino/.clang-format +154 -0
- data/ext/sources/ggml/src/ggml-openvino/CMakeLists.txt +22 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-decoder.cpp +975 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-decoder.h +294 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino-extra.cpp +373 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino-extra.h +182 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-openvino.cpp +1110 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-quants.cpp +884 -0
- data/ext/sources/ggml/src/ggml-openvino/ggml-quants.h +153 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/decoder.h +74 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/frontend.cpp +27 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/frontend.h +23 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/input_model.cpp +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/input_model.h +29 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/node_context.h +112 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/cont.cpp +48 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/cpy.cpp +21 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/flash_attn_ext.cpp +90 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/get_rows.cpp +69 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/glu_geglu.cpp +61 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/glu_swiglu.cpp +62 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/mulmat.cpp +90 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/permute.cpp +102 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/reshape.cpp +83 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/rms_norm.cpp +46 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/rope.cpp +123 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/scale.cpp +41 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/set_rows.cpp +76 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/softmax.cpp +89 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/transpose.cpp +23 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/unary_silu.cpp +27 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op/view.cpp +53 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op_table.cpp +46 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/op_table.h +39 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/eliminate_zp.cpp +123 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/eliminate_zp.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/fuse_to_sdpa.cpp +60 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/fuse_to_sdpa.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/mark_decompression_convert_constant_folding.h +29 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/squeeze_matmul.cpp +58 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/pass/squeeze_matmul.h +17 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/translate_session.cpp +293 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/translate_session.h +28 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/utils.cpp +226 -0
- data/ext/sources/ggml/src/ggml-openvino/openvino/utils.h +85 -0
- data/ext/sources/ggml/src/ggml-openvino/utils.cpp +823 -0
- data/ext/sources/ggml/src/ggml-openvino/utils.h +123 -0
- data/ext/sources/ggml/src/ggml-quants.c +96 -5
- data/ext/sources/ggml/src/ggml-quants.h +3 -0
- data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +15 -88
- data/ext/sources/ggml/src/ggml-sycl/add-id.cpp +5 -1
- data/ext/sources/ggml/src/ggml-sycl/backend.hpp +1 -0
- data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +21 -20
- data/ext/sources/ggml/src/ggml-sycl/common.hpp +315 -10
- data/ext/sources/ggml/src/ggml-sycl/convert.cpp +69 -1
- data/ext/sources/ggml/src/ggml-sycl/convert.hpp +22 -1
- data/ext/sources/ggml/src/ggml-sycl/count-equal.cpp +1 -1
- data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +791 -47
- data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +78 -68
- data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +2 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-common.hpp +1179 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-tile.cpp +55 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-tile.hpp +1338 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn-vec.hpp +667 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn.cpp +225 -0
- data/ext/sources/ggml/src/ggml-sycl/fattn.hpp +22 -0
- data/ext/sources/ggml/src/ggml-sycl/gated_delta_net.cpp +309 -0
- data/ext/sources/ggml/src/ggml-sycl/gated_delta_net.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/ggml-sycl.cpp +316 -51
- data/ext/sources/ggml/src/ggml-sycl/norm.cpp +65 -66
- data/ext/sources/ggml/src/ggml-sycl/outprod.cpp +3 -3
- data/ext/sources/ggml/src/ggml-sycl/presets.hpp +3 -0
- data/ext/sources/ggml/src/ggml-sycl/quants.hpp +1 -1
- data/ext/sources/ggml/src/ggml-sycl/rope.cpp +450 -287
- data/ext/sources/ggml/src/ggml-sycl/rope.hpp +6 -0
- data/ext/sources/ggml/src/ggml-sycl/softmax.cpp +6 -6
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq112-dv112.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq128-dv128.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq256-dv256.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq40-dv40.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq576-dv512.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq64-dv64.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq72-dv72.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq80-dv80.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-tile-instance-dkq96-dv96.cpp +5 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-f16-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q4_1-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q5_1-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-f16.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q4_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q4_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q5_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q5_1.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/template-instances/fattn-vec-instance-q8_0-q8_0.cpp +7 -0
- data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +13 -0
- data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +1 -1
- data/ext/sources/ggml/src/ggml-virtgpu/CMakeLists.txt +70 -0
- data/ext/sources/ggml/src/ggml-virtgpu/apir_cs_ggml-rpc-front.cpp +87 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/CMakeLists.txt +21 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/apir_cs_ggml-rpc-back.cpp +115 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-convert.h +13 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-backend.cpp +102 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-buffer-type.cpp +105 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-buffer.cpp +179 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched-device.cpp +148 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.cpp +51 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.gen.h +73 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-dispatched.h +27 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend-virgl-apir.h +32 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/backend.cpp +144 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/api_remoting.h +95 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_backend.gen.h +94 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_backend.h +50 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs.h +378 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs_ggml.h +232 -0
- data/ext/sources/ggml/src/ggml-virtgpu/backend/shared/apir_cs_rpc.h +58 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-buffer-type.cpp +81 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-buffer.cpp +119 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-device.cpp +158 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend-reg.cpp +213 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-backend.cpp +69 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggml-remoting.h +71 -0
- data/ext/sources/ggml/src/ggml-virtgpu/ggmlremoting_functions.yaml +166 -0
- data/ext/sources/ggml/src/ggml-virtgpu/include/apir_hw.h +9 -0
- data/ext/sources/ggml/src/ggml-virtgpu/regenerate_remoting.py +333 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-apir.h +15 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-backend.cpp +58 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-buffer-type.cpp +110 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-buffer.cpp +173 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-device.cpp +192 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward-impl.h +36 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-forward.gen.h +53 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-shm.cpp +98 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-shm.h +23 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-utils.cpp +179 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu-utils.h +86 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu.cpp +544 -0
- data/ext/sources/ggml/src/ggml-virtgpu/virtgpu.h +117 -0
- data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/ggml-vulkan.cpp +1250 -465
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +16 -8
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/elu.comp +27 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +374 -170
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.glsl +66 -22
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +389 -201
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +106 -58
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_mask_opt.comp +162 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +9 -8
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gated_delta_net.comp +128 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +12 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.glsl +20 -17
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +11 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +8 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_id_funcs.glsl +3 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +5 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.glsl +3 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +2 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_funcs.glsl +36 -63
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_params.glsl +10 -5
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +7 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sgn.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/ssm_conv.comp +16 -10
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +55 -35
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu-shader-lib.hpp +1314 -109
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu.cpp +1660 -1371
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argmax.wgsl +72 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argsort.wgsl +106 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/argsort_merge.wgsl +134 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary.wgsl +141 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/common_decls.tmpl +65 -72
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/concat.wgsl +75 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cpy.tmpl.wgsl +6 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cumsum.wgsl +66 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/embed_wgsl.py +40 -5
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/flash_attn.wgsl +105 -60
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{get_rows.tmpl.wgsl → get_rows.wgsl} +53 -259
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat.tmpl.wgsl → mul_mat.wgsl} +68 -257
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_decls.tmpl +692 -23
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat_reg_tile.tmpl.wgsl → mul_mat_reg_tile.wgsl} +28 -128
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{mul_mat_subgroup_matrix.tmpl.wgsl → mul_mat_subgroup_matrix.wgsl} +31 -137
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_vec.wgsl +480 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/pad.wgsl +86 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/repeat.wgsl +67 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/{scale.tmpl.wgsl → scale.wgsl} +9 -36
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.wgsl +40 -12
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/sum_rows.wgsl +55 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/unary.wgsl +193 -0
- data/ext/sources/ggml/src/ggml-zdnn/ggml-zdnn.cpp +6 -1
- data/ext/sources/ggml/src/ggml-zendnn/CMakeLists.txt +31 -32
- data/ext/sources/ggml/src/ggml-zendnn/ggml-zendnn.cpp +9 -6
- data/ext/sources/ggml/src/ggml.c +167 -33
- data/ext/sources/ggml/src/gguf.cpp +229 -44
- data/ext/sources/src/whisper.cpp +6 -28
- data/sig/whisper.rbs +43 -2
- data/test/test_context_params.rb +82 -0
- data/test/test_token.rb +11 -0
- data/test/test_vad_context.rb +58 -8
- data/test/test_whisper.rb +20 -0
- data/whispercpp.gemspec +1 -1
- metadata +240 -28
- data/ext/sources/ggml/cmake/BuildTypes.cmake +0 -54
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm-ppc.h +0 -333
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-exp.c +0 -94
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-inverse.c +0 -72
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-sigmoid.c +0 -49
- data/ext/sources/ggml/src/ggml-hexagon/htp/hvx-utils.c +0 -1020
- data/ext/sources/ggml/src/ggml-hexagon/htp/ops-utils.h +0 -149
- data/ext/sources/ggml/src/ggml-hexagon/htp-utils.c +0 -454
- data/ext/sources/ggml/src/ggml-hexagon/htp-utils.h +0 -221
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/bin_op.tmpl.wgsl +0 -188
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary_head.tmpl +0 -45
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat_vec.tmpl.wgsl +0 -267
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.tmpl.wgsl +0 -112
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/unary_op.wgsl +0 -483
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
#include "models.h"
|
|
2
|
+
|
|
3
|
+
// JAIS-2 model graph builder
|
|
4
|
+
// Uses: LayerNorm (not RMSNorm), relu2 activation, separate Q/K/V, RoPE embeddings
|
|
5
|
+
llm_build_jais2::llm_build_jais2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
|
|
6
|
+
const int64_t n_embd_head = hparams.n_embd_head_v();
|
|
7
|
+
|
|
8
|
+
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k());
|
|
9
|
+
GGML_ASSERT(n_embd_head == n_rot);
|
|
10
|
+
|
|
11
|
+
ggml_tensor * cur;
|
|
12
|
+
ggml_tensor * inpL;
|
|
13
|
+
|
|
14
|
+
inpL = build_inp_embd(model.tok_embd);
|
|
15
|
+
|
|
16
|
+
// inp_pos - contains the positions
|
|
17
|
+
ggml_tensor * inp_pos = build_inp_pos();
|
|
18
|
+
|
|
19
|
+
// KV input for attention
|
|
20
|
+
auto * inp_attn = build_attn_inp_kv();
|
|
21
|
+
|
|
22
|
+
ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
23
|
+
|
|
24
|
+
for (int il = 0; il < n_layer; ++il) {
|
|
25
|
+
// Pre-attention LayerNorm
|
|
26
|
+
cur = build_norm(inpL,
|
|
27
|
+
model.layers[il].attn_norm,
|
|
28
|
+
model.layers[il].attn_norm_b,
|
|
29
|
+
LLM_NORM, il);
|
|
30
|
+
cb(cur, "attn_norm", il);
|
|
31
|
+
|
|
32
|
+
// Self-attention with separate Q, K, V projections
|
|
33
|
+
{
|
|
34
|
+
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
|
|
35
|
+
cb(Qcur, "Qcur", il);
|
|
36
|
+
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
|
|
37
|
+
cb(Qcur, "Qcur_bias", il);
|
|
38
|
+
|
|
39
|
+
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
|
|
40
|
+
cb(Kcur, "Kcur", il);
|
|
41
|
+
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
|
|
42
|
+
cb(Kcur, "Kcur_bias", il);
|
|
43
|
+
|
|
44
|
+
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
|
|
45
|
+
cb(Vcur, "Vcur", il);
|
|
46
|
+
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
|
|
47
|
+
cb(Vcur, "Vcur_bias", il);
|
|
48
|
+
|
|
49
|
+
// Reshape for attention
|
|
50
|
+
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
|
|
51
|
+
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
|
|
52
|
+
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
|
|
53
|
+
|
|
54
|
+
// Apply RoPE
|
|
55
|
+
Qcur = ggml_rope_ext(
|
|
56
|
+
ctx0, Qcur, inp_pos, nullptr,
|
|
57
|
+
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
58
|
+
ext_factor, attn_factor, beta_fast, beta_slow
|
|
59
|
+
);
|
|
60
|
+
|
|
61
|
+
Kcur = ggml_rope_ext(
|
|
62
|
+
ctx0, Kcur, inp_pos, nullptr,
|
|
63
|
+
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
64
|
+
ext_factor, attn_factor, beta_fast, beta_slow
|
|
65
|
+
);
|
|
66
|
+
|
|
67
|
+
cb(Qcur, "Qcur_rope", il);
|
|
68
|
+
cb(Kcur, "Kcur_rope", il);
|
|
69
|
+
|
|
70
|
+
cur = build_attn(inp_attn,
|
|
71
|
+
model.layers[il].wo, model.layers[il].bo,
|
|
72
|
+
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
if (il == n_layer - 1 && inp_out_ids) {
|
|
76
|
+
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
|
77
|
+
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// Residual connection
|
|
81
|
+
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
|
|
82
|
+
cb(ffn_inp, "ffn_inp", il);
|
|
83
|
+
|
|
84
|
+
// Pre-FFN LayerNorm
|
|
85
|
+
cur = build_norm(ffn_inp,
|
|
86
|
+
model.layers[il].ffn_norm,
|
|
87
|
+
model.layers[il].ffn_norm_b,
|
|
88
|
+
LLM_NORM, il);
|
|
89
|
+
cb(cur, "ffn_norm", il);
|
|
90
|
+
|
|
91
|
+
// FFN with relu2 activation (ReLU squared) - no gate projection
|
|
92
|
+
// up -> relu2 -> down
|
|
93
|
+
cur = build_ffn(cur,
|
|
94
|
+
model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
|
|
95
|
+
NULL, NULL, NULL, // no gate
|
|
96
|
+
model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
|
|
97
|
+
NULL,
|
|
98
|
+
LLM_FFN_RELU_SQR, LLM_FFN_SEQ, il);
|
|
99
|
+
cb(cur, "ffn_out", il);
|
|
100
|
+
|
|
101
|
+
// Residual connection
|
|
102
|
+
inpL = ggml_add(ctx0, cur, ffn_inp);
|
|
103
|
+
inpL = build_cvec(inpL, il);
|
|
104
|
+
cb(inpL, "l_out", il);
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// Final LayerNorm
|
|
108
|
+
cur = build_norm(inpL,
|
|
109
|
+
model.output_norm,
|
|
110
|
+
model.output_norm_b,
|
|
111
|
+
LLM_NORM, -1);
|
|
112
|
+
cb(cur, "result_norm", -1);
|
|
113
|
+
|
|
114
|
+
res->t_embd = cur;
|
|
115
|
+
|
|
116
|
+
// Output projection
|
|
117
|
+
cur = build_lora_mm(model.output, cur);
|
|
118
|
+
cb(cur, "result_output", -1);
|
|
119
|
+
|
|
120
|
+
res->t_logits = cur;
|
|
121
|
+
|
|
122
|
+
ggml_build_forward_expand(gf, cur);
|
|
123
|
+
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
#include "models.h"
|
|
2
2
|
|
|
3
|
-
llm_build_jamba::llm_build_jamba(const llama_model & model, const llm_graph_params & params) :
|
|
4
|
-
const int64_t n_embd_head = hparams.n_embd_head_v;
|
|
3
|
+
llm_build_jamba::llm_build_jamba(const llama_model & model, const llm_graph_params & params) : llm_build_mamba_base(params) {
|
|
4
|
+
const int64_t n_embd_head = hparams.n_embd_head_v();
|
|
5
5
|
|
|
6
6
|
ggml_tensor * cur;
|
|
7
7
|
ggml_tensor * inpL;
|
|
@@ -76,7 +76,7 @@ llm_build_jamba::llm_build_jamba(const llama_model & model, const llm_graph_para
|
|
|
76
76
|
nullptr,
|
|
77
77
|
n_expert, n_expert_used,
|
|
78
78
|
LLM_FFN_SILU, false,
|
|
79
|
-
|
|
79
|
+
hparams.expert_weights_scale,
|
|
80
80
|
LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
|
|
81
81
|
il);
|
|
82
82
|
cb(cur, "ffn_moe_out", il);
|
|
@@ -0,0 +1,381 @@
|
|
|
1
|
+
#include "models.h"
|
|
2
|
+
|
|
3
|
+
#include "llama-memory-recurrent.h"
|
|
4
|
+
|
|
5
|
+
// Causal Conv1d function for Q,K,V
|
|
6
|
+
// When qkv is 0, it is Q, 1 is K, 2 is V
|
|
7
|
+
static ggml_tensor * causal_conv1d(ggml_cgraph * gf, ggml_context * ctx0, ggml_tensor * conv_states_all, ggml_tensor * conv_state_all, int64_t qkv, ggml_tensor * x, ggml_tensor * proj_w, ggml_tensor * conv_w, int64_t d_conv, int64_t head_dim, int64_t n_head, int64_t n_seq_tokens, int64_t n_seqs, int64_t n_tokens, int64_t kv_head) {
|
|
8
|
+
const int64_t d_inner = head_dim * n_head;
|
|
9
|
+
const int64_t conv_state_size = (d_conv - 1) * d_inner;
|
|
10
|
+
const int64_t n_embd_r_total = 3 * conv_state_size; // Q + K + V
|
|
11
|
+
|
|
12
|
+
// conv_state_all is [n_embd_r_total, n_seqs], split into Q, K, V
|
|
13
|
+
// Each conv state is [(d_conv-1) * d_inner] per sequence, need to reshape to [d_conv-1, d_inner, n_seqs]
|
|
14
|
+
// Memory layout: for each seq, Q state is first conv_state_size elements, then K, then V
|
|
15
|
+
// conv_state_all has stride: nb[0] = element_size, nb[1] = n_embd_r_total * element_size
|
|
16
|
+
// View Q conv state: offset 0, size conv_state_size per seq
|
|
17
|
+
// conv_state_all is [n_embd_r_total, n_seqs] with memory layout:
|
|
18
|
+
// state[i + seq * n_embd_r_total] where i = conv_step + channel * (d_conv-1) + {0, conv_state_size, 2*conv_state_size} for Q/K/V
|
|
19
|
+
// We want [d_conv-1, d_inner, n_seqs] view:
|
|
20
|
+
// nb1 = (d_conv-1) * element_size (stride between channels)
|
|
21
|
+
// nb2 = n_embd_r_total * element_size (stride between seqs)
|
|
22
|
+
ggml_tensor * conv_state_x = ggml_view_3d(ctx0, conv_state_all, d_conv - 1, d_inner, n_seqs,
|
|
23
|
+
(d_conv - 1) * ggml_element_size(conv_state_all), // nb1: stride between channels
|
|
24
|
+
n_embd_r_total * ggml_element_size(conv_state_all), // nb2: stride between seqs
|
|
25
|
+
qkv * conv_state_size * ggml_element_size(conv_state_all));
|
|
26
|
+
|
|
27
|
+
// Causal Conv1d function for Q,K,V
|
|
28
|
+
// When qkv is 0, it is Q, 1 is K, 2 is V
|
|
29
|
+
// Step 1: Q, K, V projections -> [d_inner, n_tokens]
|
|
30
|
+
ggml_tensor * x_proj = ggml_mul_mat(ctx0, proj_w, x);
|
|
31
|
+
|
|
32
|
+
// Reshape input: {d_inner, n_tokens} -> {d_inner, n_seq_tokens, n_seqs}
|
|
33
|
+
ggml_tensor * x_3d = ggml_reshape_3d(ctx0, x_proj, d_inner, n_seq_tokens, n_seqs);
|
|
34
|
+
|
|
35
|
+
// Concat Q conv state and current input: {d_conv-1 + n_seq_tokens, d_inner, n_seqs}
|
|
36
|
+
ggml_tensor * conv_x = ggml_concat(ctx0, conv_state_x, ggml_transpose(ctx0, x_3d), 0);
|
|
37
|
+
|
|
38
|
+
// Save last (d_conv-1) columns back to Q conv state
|
|
39
|
+
ggml_tensor * last_conv_x = ggml_view_3d(ctx0, conv_x, d_conv - 1, d_inner, n_seqs,
|
|
40
|
+
conv_x->nb[1], conv_x->nb[2], n_seq_tokens * conv_x->nb[0]);
|
|
41
|
+
ggml_build_forward_expand(gf,
|
|
42
|
+
ggml_cpy(ctx0, last_conv_x,
|
|
43
|
+
ggml_view_3d(ctx0, conv_states_all,
|
|
44
|
+
d_conv - 1, d_inner, n_seqs,
|
|
45
|
+
(d_conv - 1) * ggml_element_size(conv_states_all), // nb1: contiguous within one channel's conv taps
|
|
46
|
+
n_embd_r_total * ggml_element_size(conv_states_all), // nb2: stride between sequences (skip over K,V states)
|
|
47
|
+
(kv_head * n_embd_r_total + qkv * conv_state_size) * ggml_element_size(conv_states_all)))); // offset to first seq's Q/K/V state
|
|
48
|
+
// Reshape conv weight: GGUF [d_conv, 1, d_inner, 1] -> ggml_ssm_conv expects [d_conv, d_inner]
|
|
49
|
+
// GGUF stores as [d_conv, 1, d_inner, 1] with memory layout w[conv_step + channel * d_conv]
|
|
50
|
+
// vLLM stores as [d_inner, d_conv] with memory layout w[channel * d_conv + conv_step]
|
|
51
|
+
// ggml_ssm_conv computes: c[conv_step + channel * d_conv]
|
|
52
|
+
// GGUF layout: [d_conv, 1, d_inner] or [d_conv, 1, d_inner, 1] -> reshape to [d_conv, d_inner]
|
|
53
|
+
// Reshape conv weight from [d_conv, 1, d_inner, 1] to [d_conv, d_inner] for ggml_ssm_conv
|
|
54
|
+
ggml_tensor * conv_weight = ggml_reshape_2d(ctx0, conv_w, d_conv, d_inner);
|
|
55
|
+
|
|
56
|
+
// Apply conv1d
|
|
57
|
+
// ggml_ssm_conv output: {d_inner, n_seq_tokens, n_seqs}
|
|
58
|
+
ggml_tensor * Xcur = ggml_ssm_conv(ctx0, conv_x, conv_weight);
|
|
59
|
+
// Reshape to 2D for bias add: {d_inner, n_tokens}
|
|
60
|
+
Xcur = ggml_reshape_2d(ctx0, Xcur, d_inner, n_tokens);
|
|
61
|
+
Xcur = ggml_silu(ctx0, Xcur);
|
|
62
|
+
|
|
63
|
+
return ggml_reshape_4d(ctx0, Xcur, head_dim, n_head, n_seq_tokens, n_seqs);
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
llm_build_kimi_linear::llm_build_kimi_linear(const llama_model & model, const llm_graph_params & params) :
|
|
67
|
+
llm_build_delta_net_base(params), model(model) {
|
|
68
|
+
ggml_tensor * cur;
|
|
69
|
+
ggml_tensor * inpL;
|
|
70
|
+
|
|
71
|
+
inpL = build_inp_embd(model.tok_embd);
|
|
72
|
+
cb(inpL, "model.embed_tokens", -1);
|
|
73
|
+
|
|
74
|
+
// Note: Kimi MLA does NOT use RoPE (rotary_emb=None in vLLM)
|
|
75
|
+
// So we don't need inp_pos
|
|
76
|
+
|
|
77
|
+
auto * inp_kv = !hparams.is_mla() ? build_inp_mem_hybrid() : nullptr;
|
|
78
|
+
auto * inp_k = hparams.is_mla() ? build_inp_mem_hybrid_k() : nullptr;
|
|
79
|
+
auto * inp_rs = hparams.is_mla() ? inp_k->get_recr() : inp_kv->get_recr();
|
|
80
|
+
auto * inp_attn_kv = !hparams.is_mla() ? inp_kv->get_attn() : nullptr;
|
|
81
|
+
auto * inp_attn_k = hparams.is_mla() ? inp_k->get_attn() : nullptr;
|
|
82
|
+
|
|
83
|
+
// Output ids for selecting which tokens to output
|
|
84
|
+
ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
85
|
+
|
|
86
|
+
// Kimi dimension constants
|
|
87
|
+
const int64_t n_head = hparams.n_head();
|
|
88
|
+
const int64_t head_dim = hparams.n_embd_head_kda;
|
|
89
|
+
const int64_t d_conv = hparams.ssm_d_conv;
|
|
90
|
+
const int64_t d_inner = n_head * head_dim; // 32 * 128 = 4096
|
|
91
|
+
const int64_t n_seqs = ubatch.n_seqs;
|
|
92
|
+
const int64_t n_seq_tokens = ubatch.n_seq_tokens;
|
|
93
|
+
|
|
94
|
+
// Verify batch consistency for recurrent layers
|
|
95
|
+
GGML_ASSERT(n_seqs != 0);
|
|
96
|
+
GGML_ASSERT(ubatch.equal_seqs());
|
|
97
|
+
GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs);
|
|
98
|
+
|
|
99
|
+
// MLA params
|
|
100
|
+
const int64_t n_embd_head_k_mla = hparams.n_embd_head_k_mla();
|
|
101
|
+
const int64_t n_embd_head_v_mla = hparams.n_embd_head_v_mla();
|
|
102
|
+
const int64_t kv_lora_rank = hparams.n_lora_kv;
|
|
103
|
+
// qk_rope_head_dim = 64 (from Kimi config) which is hparams.n_rot
|
|
104
|
+
// Confirmed from tensor shape: wkv_a_mqa [2304, 576] = [n_embd, kv_lora_rank + qk_rope_head_dim]
|
|
105
|
+
const int64_t n_embd_head_qk_rope = hparams.n_rot(); // config.qk_rope_head_dim
|
|
106
|
+
const int64_t n_embd_head_qk_nope = n_embd_head_k_mla - n_embd_head_qk_rope; // 192 - 64 = 128
|
|
107
|
+
// Attention scale for MLA
|
|
108
|
+
const float kq_scale_mla = 1.0f / sqrtf((float)n_embd_head_k_mla);
|
|
109
|
+
|
|
110
|
+
for (int il = 0; il < n_layer; ++il) {
|
|
111
|
+
const auto & layer = model.layers[il];
|
|
112
|
+
ggml_tensor * inpSA = inpL;
|
|
113
|
+
|
|
114
|
+
// Attention Norm
|
|
115
|
+
cur = build_norm(inpL, layer.attn_norm, NULL, LLM_NORM_RMS, il);
|
|
116
|
+
cb(cur, "attn_norm", il);
|
|
117
|
+
|
|
118
|
+
ggml_build_forward_expand(gf, cur);
|
|
119
|
+
|
|
120
|
+
if (hparams.is_recurrent(il)) {
|
|
121
|
+
// === KDA Layer (Kimi Delta Attention) with Recurrent State ===
|
|
122
|
+
// Reference: vLLM kda.py
|
|
123
|
+
const auto * mctx_cur = inp_rs->mctx;
|
|
124
|
+
const auto kv_head = mctx_cur->get_head();
|
|
125
|
+
|
|
126
|
+
// Get conv states from r_l tensor (Q, K, V each have separate state)
|
|
127
|
+
ggml_tensor * conv_states_all = mctx_cur->get_r_l(il);
|
|
128
|
+
cb(conv_states_all, "conv_states_all", il);
|
|
129
|
+
ggml_tensor * conv_state_all = build_rs(inp_rs, conv_states_all, hparams.n_embd_r(), n_seqs);
|
|
130
|
+
ggml_tensor * Qcur = causal_conv1d(gf, ctx0, conv_states_all, conv_state_all, 0, cur, layer.wq, layer.ssm_q_conv, d_conv, head_dim, n_head, n_seq_tokens, n_seqs, n_tokens, kv_head);
|
|
131
|
+
ggml_tensor * Kcur = causal_conv1d(gf, ctx0, conv_states_all, conv_state_all, 1, cur, layer.wk, layer.ssm_k_conv, d_conv, head_dim, n_head, n_seq_tokens, n_seqs, n_tokens, kv_head);
|
|
132
|
+
ggml_tensor * Vcur = causal_conv1d(gf, ctx0, conv_states_all, conv_state_all, 2, cur, layer.wv, layer.ssm_v_conv, d_conv, head_dim, n_head, n_seq_tokens, n_seqs, n_tokens, kv_head);
|
|
133
|
+
|
|
134
|
+
// g1 = -exp(A_log) * softplus(f_b(f_a(x)) + dt_bias)
|
|
135
|
+
ggml_tensor * f_a = ggml_mul_mat(ctx0, layer.ssm_f_a, cur);
|
|
136
|
+
ggml_tensor * g1 = ggml_mul_mat(ctx0, layer.ssm_f_b, f_a);
|
|
137
|
+
cb(g1, "g1 f_b(f_a(cur))", il);
|
|
138
|
+
g1 = ggml_add(ctx0, g1, layer.ssm_dt_b);
|
|
139
|
+
g1 = ggml_softplus(ctx0, g1);
|
|
140
|
+
g1 = ggml_reshape_3d(ctx0, g1, head_dim, n_head, n_tokens);
|
|
141
|
+
|
|
142
|
+
// A_log shape is [1, n_head] or [1, n_head, 1, 1], need to broadcast to [head_dim, n_head, n_tokens]. No need to -exp(a_log) because it was done in convert_hf_to_gguf.py
|
|
143
|
+
// Reshape to [1, n_head, 1] for broadcasting with g1 [head_dim, n_head, n_tokens]
|
|
144
|
+
ggml_tensor * A = ggml_reshape_3d(ctx0, layer.ssm_a, 1, n_head, 1);
|
|
145
|
+
g1 = ggml_mul(ctx0, g1, A);
|
|
146
|
+
cb(g1, "kda_g1", il);
|
|
147
|
+
|
|
148
|
+
g1 = ggml_reshape_4d(ctx0, g1, head_dim, n_head, n_seq_tokens, n_seqs);
|
|
149
|
+
|
|
150
|
+
// Compute beta (mixing coefficient)
|
|
151
|
+
ggml_tensor * beta = ggml_mul_mat(ctx0, layer.ssm_beta, cur);
|
|
152
|
+
beta = ggml_reshape_4d(ctx0, beta, 1, n_head, n_seq_tokens, n_seqs);
|
|
153
|
+
cb(beta, "kda_beta", il);
|
|
154
|
+
|
|
155
|
+
beta = ggml_sigmoid(ctx0, beta);
|
|
156
|
+
|
|
157
|
+
// Reshape for KDA recurrence
|
|
158
|
+
// {n_embd, n_tokens} -> {n_embd, n_seq_tokens, n_seqs}
|
|
159
|
+
cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], n_seq_tokens, n_seqs);
|
|
160
|
+
|
|
161
|
+
// Get SSM state and compute KDA recurrence using ggml_kda_scan
|
|
162
|
+
ggml_tensor * ssm_states_all = mctx_cur->get_s_l(il);
|
|
163
|
+
ggml_tensor * state = build_rs(inp_rs, ssm_states_all, hparams.n_embd_s(), n_seqs);
|
|
164
|
+
state = ggml_reshape_4d(ctx0, state, head_dim, head_dim, n_head, n_seqs);
|
|
165
|
+
|
|
166
|
+
const float eps_norm = hparams.f_norm_rms_eps;
|
|
167
|
+
|
|
168
|
+
Qcur = ggml_l2_norm(ctx0, Qcur, eps_norm);
|
|
169
|
+
Kcur = ggml_l2_norm(ctx0, Kcur, eps_norm);
|
|
170
|
+
|
|
171
|
+
// Choose between build_delta_net_chunking and build_delta_net_recurrent based on n_tokens
|
|
172
|
+
auto attn_out = build_delta_net(Qcur, Kcur, Vcur, g1, beta, state, il);
|
|
173
|
+
|
|
174
|
+
ggml_tensor * output = ggml_cont(ctx0, attn_out.first);
|
|
175
|
+
ggml_tensor * new_state = attn_out.second;
|
|
176
|
+
cb(output, "attn_output", il);
|
|
177
|
+
cb(new_state, "new_state", il);
|
|
178
|
+
|
|
179
|
+
// Update the recurrent states
|
|
180
|
+
ggml_build_forward_expand(gf,
|
|
181
|
+
ggml_cpy(ctx0, new_state,
|
|
182
|
+
ggml_view_1d(ctx0, ssm_states_all, hparams.n_embd_s() * n_seqs,
|
|
183
|
+
kv_head * hparams.n_embd_s() * ggml_element_size(ssm_states_all))));
|
|
184
|
+
|
|
185
|
+
// Output gating g2 = g_b(g_a(x))
|
|
186
|
+
ggml_tensor * cur_2d = ggml_reshape_2d(ctx0, cur, cur->ne[0], n_seq_tokens * n_seqs);
|
|
187
|
+
ggml_tensor * g_a = ggml_mul_mat(ctx0, layer.ssm_g_a, cur_2d);
|
|
188
|
+
ggml_tensor * g2 = ggml_mul_mat(ctx0, layer.ssm_g_b, g_a);
|
|
189
|
+
cb(g2, "g2 g_b(g_a(cur_2d))", il);
|
|
190
|
+
g2 = ggml_reshape_3d(ctx0, g2, head_dim, n_head, n_seq_tokens * n_seqs);
|
|
191
|
+
|
|
192
|
+
// Apply o_norm with sigmoid gating
|
|
193
|
+
// Note: Kimi model uses sigmoid gating, not SiLU (despite FusedRMSNormGated default being swish)
|
|
194
|
+
// Formula: output = RMSNorm(x) * sigmoid(g)
|
|
195
|
+
ggml_tensor * attn_out_final = ggml_reshape_3d(ctx0, output, head_dim, n_head, n_seq_tokens * n_seqs);
|
|
196
|
+
ggml_tensor * normed = build_norm(attn_out_final, layer.ssm_o_norm, nullptr, LLM_NORM_RMS, il);
|
|
197
|
+
cb(normed, "kda_normed", il);
|
|
198
|
+
ggml_tensor * gate = ggml_sigmoid(ctx0, g2);
|
|
199
|
+
ggml_tensor * gated = ggml_mul(ctx0, normed, gate);
|
|
200
|
+
|
|
201
|
+
// Output projection
|
|
202
|
+
gated = ggml_cont_2d(ctx0, gated, d_inner, n_tokens);
|
|
203
|
+
cur = ggml_mul_mat(ctx0, layer.wo, gated);
|
|
204
|
+
cb(cur, "kda_out", il);
|
|
205
|
+
|
|
206
|
+
} else {
|
|
207
|
+
// === MLA Layer (Multi-head Latent Attention) without KV Cache ===
|
|
208
|
+
// Reference: vLLM mla.py
|
|
209
|
+
// Step 1: Q projection and reshape
|
|
210
|
+
// vLLM Kimi: q = q_proj(hidden_states), then view as [n_tokens, n_head, qk_head_dim]
|
|
211
|
+
// Note: Kimi MLA does NOT use RoPE (rotary_emb=None in vLLM)
|
|
212
|
+
ggml_tensor * Qcur = ggml_mul_mat(ctx0, layer.wq, cur);
|
|
213
|
+
|
|
214
|
+
// Step 2: KV compression
|
|
215
|
+
// kv_cmpr_pe = kv_a_proj_with_mqa(hidden_states) -> [kv_lora_rank + qk_rope_head_dim, n_tokens]
|
|
216
|
+
ggml_tensor * kv_cmpr_pe = ggml_mul_mat(ctx0, layer.wkv_a_mqa, cur);
|
|
217
|
+
|
|
218
|
+
// Split: kv_cmpr = kv_lora[:kv_lora_rank], k_pe = kv_lora[kv_lora_rank:]
|
|
219
|
+
ggml_tensor * kv_cmpr = ggml_view_2d(ctx0, kv_cmpr_pe, kv_lora_rank, n_tokens,
|
|
220
|
+
ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope), 0);
|
|
221
|
+
ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_cmpr_pe, n_embd_head_qk_rope, 1, n_tokens,
|
|
222
|
+
ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope),
|
|
223
|
+
ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope),
|
|
224
|
+
ggml_row_size(kv_cmpr_pe->type, kv_lora_rank));
|
|
225
|
+
// Note: Kimi MLA does NOT apply RoPE (rotary_emb=None in vLLM)
|
|
226
|
+
// k_pe is used directly without RoPE
|
|
227
|
+
// Normalize kv_c
|
|
228
|
+
kv_cmpr = build_norm(kv_cmpr, layer.attn_kv_a_norm, nullptr, LLM_NORM_RMS, il);
|
|
229
|
+
|
|
230
|
+
if (layer.wk_b && layer.wv_b) { // MLA KV cache enabled
|
|
231
|
+
// extract q_nope
|
|
232
|
+
ggml_tensor * q_nope =
|
|
233
|
+
ggml_view_3d(ctx0, Qcur, n_embd_head_qk_nope, n_head, n_tokens, ggml_row_size(Qcur->type, n_embd_head_k_mla),
|
|
234
|
+
ggml_row_size(Qcur->type, n_embd_head_k_mla) * n_head, 0);
|
|
235
|
+
cb(q_nope, "q_nope", il);
|
|
236
|
+
|
|
237
|
+
// and {n_embd_head_qk_rope, n_head, n_tokens}
|
|
238
|
+
ggml_tensor * q_pe = ggml_view_3d(
|
|
239
|
+
ctx0, Qcur, n_embd_head_qk_rope, n_head, n_tokens, ggml_row_size(Qcur->type, n_embd_head_k_mla),
|
|
240
|
+
ggml_row_size(Qcur->type, n_embd_head_k_mla) * n_head, ggml_row_size(Qcur->type, n_embd_head_qk_nope));
|
|
241
|
+
cb(q_pe, "q_pe", il);
|
|
242
|
+
|
|
243
|
+
// {n_embd_head_qk_nope, n_tokens, n_head}
|
|
244
|
+
q_nope = ggml_permute(ctx0, q_nope, 0, 2, 1, 3);
|
|
245
|
+
cb(q_nope, "q_nope_perm", il);
|
|
246
|
+
|
|
247
|
+
// {n_embd_head_qk_nope, kv_lora_rank, n_head} x {n_embd_head_qk_nope, n_tokens, n_head}
|
|
248
|
+
ggml_tensor * q_nope_absorbed = ggml_mul_mat(ctx0, layer.wk_b, q_nope);
|
|
249
|
+
cb(q_nope_absorbed, "q_nope_absorbed", il);
|
|
250
|
+
|
|
251
|
+
// {kv_lora_rank, n_head, n_tokens}
|
|
252
|
+
q_nope_absorbed = ggml_permute(ctx0, q_nope_absorbed, 0, 2, 1, 3);
|
|
253
|
+
cb(q_nope_absorbed, "q_nope_absorbed_perm", il);
|
|
254
|
+
|
|
255
|
+
// {n_embd_head_qk_rope + kv_lora_rank, n_head, n_tokens}
|
|
256
|
+
// note: rope must go first for in-place context shifting in build_rope_shift()
|
|
257
|
+
Qcur = ggml_concat(ctx0, q_nope_absorbed, q_pe, 0);
|
|
258
|
+
cb(Qcur, "Qcur", il);
|
|
259
|
+
|
|
260
|
+
kv_cmpr = ggml_reshape_3d(ctx0, kv_cmpr, kv_lora_rank, 1, n_tokens);
|
|
261
|
+
cb(kv_cmpr, "kv_cmpr_reshape", il);
|
|
262
|
+
|
|
263
|
+
// {n_embd_head_qk_rope + kv_lora_rank, 1, n_tokens}
|
|
264
|
+
ggml_tensor * Kcur = ggml_concat(ctx0, kv_cmpr, k_pe, 0);
|
|
265
|
+
cb(Kcur, "Kcur", il);
|
|
266
|
+
|
|
267
|
+
// {kv_lora_rank, 1, n_tokens}
|
|
268
|
+
ggml_tensor * Vcur = kv_cmpr;
|
|
269
|
+
cb(Vcur, "Vcur", il);
|
|
270
|
+
|
|
271
|
+
cur = build_attn(inp_attn_k, layer.wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, layer.wv_b, kq_scale_mla, il);
|
|
272
|
+
cb(cur, "mla_out", il);
|
|
273
|
+
} else { // MLA KV cache disabled. Fall back to MHA KV cache.
|
|
274
|
+
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head_k_mla, n_head, n_tokens);
|
|
275
|
+
cb(Qcur, "mla_Q", il);
|
|
276
|
+
// KV decompression: kv = kv_b_proj(kv_c_normed)
|
|
277
|
+
ggml_tensor * kv = ggml_mul_mat(ctx0, layer.wkv_b, kv_cmpr);
|
|
278
|
+
const int64_t kv_per_head = n_embd_head_qk_nope + n_embd_head_v_mla;
|
|
279
|
+
|
|
280
|
+
// Split kv into k_nope and v
|
|
281
|
+
ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
|
|
282
|
+
ggml_row_size(kv->type, kv_per_head),
|
|
283
|
+
ggml_row_size(kv->type, kv_per_head * n_head), 0);
|
|
284
|
+
ggml_tensor * Vcur = ggml_view_3d(ctx0, kv, n_embd_head_v_mla, n_head, n_tokens,
|
|
285
|
+
ggml_row_size(kv->type, kv_per_head),
|
|
286
|
+
ggml_row_size(kv->type, kv_per_head * n_head),
|
|
287
|
+
ggml_row_size(kv->type, n_embd_head_qk_nope));
|
|
288
|
+
Vcur = ggml_cont(ctx0, Vcur);
|
|
289
|
+
cb(Vcur, "mla_V", il);
|
|
290
|
+
|
|
291
|
+
// Concatenate k_nope + k_pe (broadcast k_pe to all heads)
|
|
292
|
+
// K = [k_nope, k_pe] where k_nope is [qk_nope_head_dim, n_head, n_tokens]
|
|
293
|
+
// and k_pe is [qk_rope_head_dim, 1, n_tokens] broadcast to all heads
|
|
294
|
+
// Need to broadcast k_pe from [qk_rope, 1, n_tokens] to [qk_rope, n_head, n_tokens]
|
|
295
|
+
ggml_tensor * k_pe_target = ggml_new_tensor_3d(ctx0, k_pe->type, n_embd_head_qk_rope, n_head, n_tokens);
|
|
296
|
+
ggml_tensor * k_pe_repeated = ggml_repeat(ctx0, k_pe, k_pe_target);
|
|
297
|
+
ggml_tensor * Kcur = ggml_concat(ctx0, k_pe_repeated, k_nope, 0);
|
|
298
|
+
cb(Kcur, "mla_K", il);
|
|
299
|
+
|
|
300
|
+
// Direct softmax attention (with MHA KV cache)
|
|
301
|
+
// Use build_attn with inp_attn for proper mask handling
|
|
302
|
+
cur = build_attn(inp_attn_kv, layer.wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale_mla, il);
|
|
303
|
+
cb(cur, "mla_out", il);
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
// On last layer, select only the output tokens
|
|
308
|
+
if (il == n_layer - 1 && inp_out_ids) {
|
|
309
|
+
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
|
310
|
+
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
// Residual
|
|
314
|
+
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
|
|
315
|
+
cb(ffn_inp, "ffn_inp", il);
|
|
316
|
+
|
|
317
|
+
// FFN Norm
|
|
318
|
+
cur = build_norm(ffn_inp, layer.ffn_norm, NULL, LLM_NORM_RMS, il);
|
|
319
|
+
cb(cur, "ffn_norm", il);
|
|
320
|
+
|
|
321
|
+
if ((uint32_t) il < hparams.n_layer_dense_lead) {
|
|
322
|
+
// Dense FFN layer
|
|
323
|
+
cur = build_ffn(cur,
|
|
324
|
+
layer.ffn_up, NULL, NULL,
|
|
325
|
+
layer.ffn_gate, NULL, NULL,
|
|
326
|
+
layer.ffn_down, NULL, NULL,
|
|
327
|
+
NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
|
|
328
|
+
cb(cur, "ffn_out", il);
|
|
329
|
+
} else {
|
|
330
|
+
// MoE layer
|
|
331
|
+
// Kimi uses moe_renormalize=True and routed_scaling_factor (stored as expert_weights_scale) = 2.446
|
|
332
|
+
ggml_tensor * moe_out = build_moe_ffn(cur,
|
|
333
|
+
layer.ffn_gate_inp,
|
|
334
|
+
layer.ffn_up_exps,
|
|
335
|
+
layer.ffn_gate_exps,
|
|
336
|
+
layer.ffn_down_exps,
|
|
337
|
+
layer.ffn_exp_probs_b,
|
|
338
|
+
hparams.n_expert,
|
|
339
|
+
hparams.n_expert_used,
|
|
340
|
+
LLM_FFN_SILU, true,
|
|
341
|
+
hparams.expert_weights_scale,
|
|
342
|
+
(llama_expert_gating_func_type) hparams.expert_gating_func,
|
|
343
|
+
il);
|
|
344
|
+
cb(moe_out, "ffn_moe_out", il);
|
|
345
|
+
|
|
346
|
+
// Shared expert
|
|
347
|
+
{
|
|
348
|
+
ggml_tensor * ffn_shexp = build_ffn(cur,
|
|
349
|
+
layer.ffn_up_shexp, NULL, NULL,
|
|
350
|
+
layer.ffn_gate_shexp, NULL, NULL,
|
|
351
|
+
layer.ffn_down_shexp, NULL, NULL,
|
|
352
|
+
NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
|
|
353
|
+
cb(ffn_shexp, "ffn_shexp", il);
|
|
354
|
+
|
|
355
|
+
cur = ggml_add(ctx0, moe_out, ffn_shexp);
|
|
356
|
+
cb(cur, "ffn_out", il);
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
// Residual
|
|
360
|
+
cur = ggml_add(ctx0, cur, ffn_inp);
|
|
361
|
+
|
|
362
|
+
cur = build_cvec(cur, il);
|
|
363
|
+
cb(cur, "l_out", il);
|
|
364
|
+
|
|
365
|
+
inpL = cur;
|
|
366
|
+
}
|
|
367
|
+
cur = inpL;
|
|
368
|
+
|
|
369
|
+
// Final Norm
|
|
370
|
+
cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
|
|
371
|
+
|
|
372
|
+
cb(cur, "result_norm", -1);
|
|
373
|
+
res->t_embd = cur;
|
|
374
|
+
|
|
375
|
+
// Output
|
|
376
|
+
cur = ggml_mul_mat(ctx0, model.output, cur);
|
|
377
|
+
cb(cur, "result_output", -1);
|
|
378
|
+
res->t_logits = cur;
|
|
379
|
+
|
|
380
|
+
ggml_build_forward_expand(gf, cur);
|
|
381
|
+
}
|