whispercpp 1.3.3 → 1.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ext/ruby_whisper_params.c +55 -25
- data/ext/sources/CMakeLists.txt +1 -1
- data/ext/sources/bindings/javascript/package.json +1 -1
- data/ext/sources/build-xcframework.sh +24 -0
- data/ext/sources/examples/CMakeLists.txt +1 -0
- data/ext/sources/examples/addon.node/addon.cpp +19 -19
- data/ext/sources/examples/addon.node/index.js +7 -5
- data/ext/sources/examples/bench/bench.cpp +26 -16
- data/ext/sources/examples/bench.wasm/index-tmpl.html +10 -9
- data/ext/sources/examples/cli/cli.cpp +4 -2
- data/ext/sources/examples/command/command.cpp +26 -24
- data/ext/sources/examples/command.wasm/index-tmpl.html +5 -4
- data/ext/sources/examples/common-ggml.cpp +2 -0
- data/ext/sources/examples/lsp/lsp.cpp +19 -17
- data/ext/sources/examples/server/server.cpp +24 -13
- data/ext/sources/examples/server.py +6 -1
- data/ext/sources/examples/stream/stream.cpp +4 -2
- data/ext/sources/examples/stream.wasm/emscripten.cpp +6 -6
- data/ext/sources/examples/stream.wasm/index-tmpl.html +82 -5
- data/ext/sources/examples/talk-llama/CMakeLists.txt +2 -2
- data/ext/sources/examples/talk-llama/llama-adapter.cpp +101 -4
- data/ext/sources/examples/talk-llama/llama-adapter.h +6 -0
- data/ext/sources/examples/talk-llama/llama-arch.cpp +588 -15
- data/ext/sources/examples/talk-llama/llama-arch.h +58 -1
- data/ext/sources/examples/talk-llama/llama-batch.cpp +103 -71
- data/ext/sources/examples/talk-llama/llama-batch.h +31 -18
- data/ext/sources/examples/talk-llama/llama-chat.cpp +120 -5
- data/ext/sources/examples/talk-llama/llama-chat.h +7 -0
- data/ext/sources/examples/talk-llama/llama-context.cpp +460 -357
- data/ext/sources/examples/talk-llama/llama-context.h +44 -29
- data/ext/sources/examples/talk-llama/llama-cparams.h +4 -4
- data/ext/sources/examples/talk-llama/llama-graph.cpp +543 -271
- data/ext/sources/examples/talk-llama/llama-graph.h +278 -168
- data/ext/sources/examples/talk-llama/llama-hparams.cpp +118 -4
- data/ext/sources/examples/talk-llama/llama-hparams.h +61 -15
- data/ext/sources/examples/talk-llama/llama-impl.h +2 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache-iswa.cpp +326 -0
- data/ext/sources/examples/talk-llama/{llama-kv-cache-unified-iswa.h → llama-kv-cache-iswa.h} +38 -29
- data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +2020 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache.h +358 -27
- data/ext/sources/examples/talk-llama/llama-kv-cells.h +80 -28
- data/ext/sources/examples/talk-llama/llama-memory-hybrid.cpp +56 -36
- data/ext/sources/examples/talk-llama/llama-memory-hybrid.h +30 -29
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.cpp +48 -19
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.h +13 -14
- data/ext/sources/examples/talk-llama/llama-memory.h +16 -10
- data/ext/sources/examples/talk-llama/llama-model-loader.cpp +2 -0
- data/ext/sources/examples/talk-llama/llama-model-loader.h +3 -2
- data/ext/sources/examples/talk-llama/llama-model.cpp +7165 -2336
- data/ext/sources/examples/talk-llama/llama-model.h +60 -9
- data/ext/sources/examples/talk-llama/llama-quant.cpp +48 -10
- data/ext/sources/examples/talk-llama/llama-sampling.cpp +226 -126
- data/ext/sources/examples/talk-llama/llama-vocab.cpp +440 -13
- data/ext/sources/examples/talk-llama/llama-vocab.h +45 -0
- data/ext/sources/examples/talk-llama/llama.cpp +65 -10
- data/ext/sources/examples/talk-llama/llama.h +95 -177
- data/ext/sources/examples/talk-llama/talk-llama.cpp +9 -6
- data/ext/sources/examples/talk-llama/unicode.cpp +207 -0
- data/ext/sources/examples/talk-llama/unicode.h +45 -0
- data/ext/sources/examples/wchess/wchess.cmd/wchess.cmd.cpp +4 -2
- data/ext/sources/examples/whisper.wasm/index-tmpl.html +17 -16
- data/ext/sources/ggml/CMakeLists.txt +59 -31
- data/ext/sources/ggml/cmake/ggml-config.cmake.in +132 -93
- data/ext/sources/ggml/include/ggml-backend.h +17 -1
- data/ext/sources/ggml/include/ggml-cpu.h +1 -1
- data/ext/sources/ggml/include/ggml-metal.h +1 -6
- data/ext/sources/ggml/include/ggml-opt.h +25 -6
- data/ext/sources/ggml/include/ggml-webgpu.h +19 -0
- data/ext/sources/ggml/include/ggml-zdnn.h +17 -0
- data/ext/sources/ggml/include/ggml.h +221 -16
- data/ext/sources/ggml/src/CMakeLists.txt +17 -2
- data/ext/sources/ggml/src/ggml-alloc.c +265 -141
- data/ext/sources/ggml/src/ggml-backend-impl.h +4 -1
- data/ext/sources/ggml/src/ggml-backend-reg.cpp +30 -13
- data/ext/sources/ggml/src/ggml-backend.cpp +221 -38
- data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-blas/ggml-blas.cpp +5 -4
- data/ext/sources/ggml/src/ggml-cann/CMakeLists.txt +14 -0
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.cpp +3 -1
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.cpp +903 -717
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +143 -25
- data/ext/sources/ggml/src/ggml-cann/common.h +143 -1
- data/ext/sources/ggml/src/ggml-cann/ggml-cann.cpp +488 -69
- data/ext/sources/ggml/src/ggml-common.h +17 -0
- data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +40 -18
- data/ext/sources/ggml/src/ggml-cpu/amx/amx.cpp +4 -2
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/quants.c +132 -596
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/repack.cpp +14 -286
- data/ext/sources/ggml/src/ggml-cpu/arch/loongarch/quants.c +103 -582
- data/ext/sources/ggml/src/ggml-cpu/arch/powerpc/quants.c +162 -589
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/quants.c +265 -437
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/repack.cpp +3 -58
- data/ext/sources/ggml/src/ggml-cpu/arch/s390/quants.c +521 -353
- data/ext/sources/ggml/src/ggml-cpu/arch/wasm/quants.c +54 -314
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/quants.c +184 -675
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/repack.cpp +4679 -1657
- data/ext/sources/ggml/src/ggml-cpu/arch-fallback.h +32 -2
- data/ext/sources/ggml/src/ggml-cpu/common.h +14 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-impl.h +13 -6
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +70 -42
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.cpp +35 -28
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +152 -18
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.h +7 -1
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +227 -97
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +474 -1116
- data/ext/sources/ggml/src/ggml-cpu/ops.cpp +1587 -1177
- data/ext/sources/ggml/src/ggml-cpu/ops.h +5 -8
- data/ext/sources/ggml/src/ggml-cpu/quants.c +35 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.h +8 -0
- data/ext/sources/ggml/src/ggml-cpu/repack.cpp +458 -47
- data/ext/sources/ggml/src/ggml-cpu/repack.h +22 -0
- data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +89 -60
- data/ext/sources/ggml/src/ggml-cpu/spacemit/ime.cpp +1024 -0
- data/ext/sources/ggml/src/ggml-cpu/spacemit/ime.h +13 -0
- data/ext/sources/ggml/src/ggml-cpu/spacemit/ime1_kernels.cpp +3196 -0
- data/ext/sources/ggml/src/ggml-cpu/spacemit/ime_kernels.h +26 -0
- data/ext/sources/ggml/src/ggml-cpu/traits.cpp +2 -2
- data/ext/sources/ggml/src/ggml-cpu/traits.h +1 -1
- data/ext/sources/ggml/src/ggml-cpu/vec.cpp +170 -26
- data/ext/sources/ggml/src/ggml-cpu/vec.h +506 -63
- data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +20 -16
- data/ext/sources/ggml/src/ggml-cuda/add-id.cu +58 -0
- data/ext/sources/ggml/src/ggml-cuda/add-id.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +330 -191
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/common.cuh +250 -63
- data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cu +1 -4
- data/ext/sources/ggml/src/ggml-cuda/conv2d.cu +166 -0
- data/ext/sources/ggml/src/ggml-cuda/conv2d.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/convert.cu +95 -22
- data/ext/sources/ggml/src/ggml-cuda/convert.cuh +15 -0
- data/ext/sources/ggml/src/ggml-cuda/cpy-utils.cuh +217 -0
- data/ext/sources/ggml/src/ggml-cuda/cpy.cu +64 -307
- data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cu +2 -14
- data/ext/sources/ggml/src/ggml-cuda/dequantize.cuh +14 -40
- data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +498 -367
- data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +137 -91
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cu +755 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec.cuh +593 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +86 -50
- data/ext/sources/ggml/src/ggml-cuda/fattn.cu +185 -198
- data/ext/sources/ggml/src/ggml-cuda/fattn.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/getrows.cu +50 -39
- data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +379 -107
- data/ext/sources/ggml/src/ggml-cuda/im2col.cu +196 -35
- data/ext/sources/ggml/src/ggml-cuda/im2col.cuh +1 -0
- data/ext/sources/ggml/src/ggml-cuda/mean.cu +56 -2
- data/ext/sources/ggml/src/ggml-cuda/mma.cuh +198 -45
- data/ext/sources/ggml/src/ggml-cuda/mmf.cu +123 -0
- data/ext/sources/ggml/src/ggml-cuda/mmf.cuh +496 -0
- data/ext/sources/ggml/src/ggml-cuda/mmq.cu +206 -57
- data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +1262 -721
- data/ext/sources/ggml/src/ggml-cuda/{mmv.cu → mmvf.cu} +53 -53
- data/ext/sources/ggml/src/ggml-cuda/{mmv.cuh → mmvf.cuh} +3 -3
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +64 -73
- data/ext/sources/ggml/src/ggml-cuda/norm.cu +284 -12
- data/ext/sources/ggml/src/ggml-cuda/norm.cuh +7 -0
- data/ext/sources/ggml/src/ggml-cuda/opt-step-sgd.cu +49 -0
- data/ext/sources/ggml/src/ggml-cuda/opt-step-sgd.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/pad.cu +46 -23
- data/ext/sources/ggml/src/ggml-cuda/pad_reflect_1d.cu +91 -0
- data/ext/sources/ggml/src/ggml-cuda/pad_reflect_1d.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/quantize.cu +12 -10
- data/ext/sources/ggml/src/ggml-cuda/reduce_rows.cuh +53 -0
- data/ext/sources/ggml/src/ggml-cuda/roll.cu +67 -0
- data/ext/sources/ggml/src/ggml-cuda/roll.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/rope.cu +21 -27
- data/ext/sources/ggml/src/ggml-cuda/scale.cu +14 -11
- data/ext/sources/ggml/src/ggml-cuda/set-rows.cu +276 -0
- data/ext/sources/ggml/src/ggml-cuda/set-rows.cuh +7 -0
- data/ext/sources/ggml/src/ggml-cuda/softcap.cu +34 -0
- data/ext/sources/ggml/src/ggml-cuda/softcap.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/softmax.cu +126 -59
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +10 -2
- data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cu +322 -100
- data/ext/sources/ggml/src/ggml-cuda/sum.cu +6 -10
- data/ext/sources/ggml/src/ggml-cuda/sumrows.cu +21 -4
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-f16.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q4_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q4_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q5_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q5_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q8_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-f16.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q4_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q4_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q5_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q5_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q8_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-f16.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q4_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q4_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q5_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q5_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q8_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-f16.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q4_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q4_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q5_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q5_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q8_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-f16.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q4_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q4_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q5_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q5_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q8_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-f16.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q4_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q4_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q5_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q5_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q8_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +21 -18
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_10.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_11.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_12.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_13.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_14.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_15.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_2.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_3.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_4.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_5.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_6.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_7.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_8.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_9.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-mxfp4.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cu +259 -0
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cuh +14 -0
- data/ext/sources/ggml/src/ggml-cuda/tsembd.cu +3 -3
- data/ext/sources/ggml/src/ggml-cuda/unary.cu +90 -0
- data/ext/sources/ggml/src/ggml-cuda/unary.cuh +8 -0
- data/ext/sources/ggml/src/ggml-cuda/upscale.cu +92 -6
- data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +110 -22
- data/ext/sources/ggml/src/ggml-cuda/vendors/cuda.h +4 -0
- data/ext/sources/ggml/src/ggml-cuda/vendors/hip.h +58 -36
- data/ext/sources/ggml/src/ggml-cuda/vendors/musa.h +4 -3
- data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +10 -2
- data/ext/sources/ggml/src/ggml-impl.h +119 -9
- data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +10 -7
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-common.cpp +446 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-common.h +52 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.h +33 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.m +600 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.cpp +1376 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.h +226 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.m +1308 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +136 -63
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.cpp +3158 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.h +82 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.cpp +718 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.metal +2854 -1503
- data/ext/sources/ggml/src/ggml-musa/CMakeLists.txt +18 -8
- data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +18 -0
- data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +2510 -242
- data/ext/sources/ggml/src/ggml-opencl/kernels/add.cl +107 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/add_id.cl +42 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/conv2d.cl +185 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/conv2d_f16_f32.cl +176 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +84 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/div.cl +66 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/flash_attn_f16.cl +370 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/flash_attn_f32.cl +370 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/flash_attn_f32_f16.cl +373 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gelu.cl +27 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/glu.cl +177 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/group_norm.cl +49 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f16.cl +1 -1
- data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f32.cl +1 -1
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul.cl +73 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mat_f16_f32.cl +130 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f16_f32_l4_lm.cl +132 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f32_f32_l4_lm.cl +133 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32.cl +189 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32_flat.cl +176 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_q8_0_f32.cl +140 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_q8_0_f32_flat.cl +222 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_mxfp4_f32.cl +144 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_mxfp4_f32_flat.cl +167 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q8_0_f32.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q8_0_f32_flat.cl +202 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/norm.cl +80 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/rms_norm.cl +79 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +3 -2
- data/ext/sources/ggml/src/ggml-opencl/kernels/set_rows.cl +189 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f16.cl +34 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f32.cl +34 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f16.cl +34 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f32.cl +34 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/sub.cl +66 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +20 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/tsembd.cl +2 -2
- data/ext/sources/ggml/src/ggml-opencl/kernels/upscale.cl +2 -3
- data/ext/sources/ggml/src/ggml-opt.cpp +97 -41
- data/ext/sources/ggml/src/ggml-quants.c +111 -16
- data/ext/sources/ggml/src/ggml-quants.h +6 -0
- data/ext/sources/ggml/src/ggml-rpc/ggml-rpc.cpp +67 -47
- data/ext/sources/ggml/src/ggml-sycl/backend.hpp +2 -0
- data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +15 -5
- data/ext/sources/ggml/src/ggml-sycl/binbcast.hpp +6 -0
- data/ext/sources/ggml/src/ggml-sycl/concat.cpp +25 -16
- data/ext/sources/ggml/src/ggml-sycl/conv.cpp +10 -4
- data/ext/sources/ggml/src/ggml-sycl/convert.cpp +166 -99
- data/ext/sources/ggml/src/ggml-sycl/cpy.cpp +72 -306
- data/ext/sources/ggml/src/ggml-sycl/cpy.hpp +213 -1
- data/ext/sources/ggml/src/ggml-sycl/dmmv.cpp +67 -49
- data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +1 -31
- data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +79 -29
- data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +2 -0
- data/ext/sources/ggml/src/ggml-sycl/gemm.hpp +14 -26
- data/ext/sources/ggml/src/ggml-sycl/getrows.cpp +9 -6
- data/ext/sources/ggml/src/ggml-sycl/ggml-sycl.cpp +328 -323
- data/ext/sources/ggml/src/ggml-sycl/gla.cpp +2 -2
- data/ext/sources/ggml/src/ggml-sycl/im2col.cpp +2 -2
- data/ext/sources/ggml/src/ggml-sycl/mmq.cpp +80 -60
- data/ext/sources/ggml/src/ggml-sycl/mmvq.cpp +201 -132
- data/ext/sources/ggml/src/ggml-sycl/norm.cpp +74 -55
- data/ext/sources/ggml/src/ggml-sycl/quantize.hpp +133 -0
- data/ext/sources/ggml/src/ggml-sycl/quants.hpp +8 -9
- data/ext/sources/ggml/src/ggml-sycl/rope.cpp +35 -42
- data/ext/sources/ggml/src/ggml-sycl/set_rows.cpp +234 -0
- data/ext/sources/ggml/src/ggml-sycl/set_rows.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/softmax.cpp +3 -3
- data/ext/sources/ggml/src/ggml-sycl/tsembd.cpp +12 -6
- data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +2 -6
- data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +16 -12
- data/ext/sources/ggml/src/ggml-vulkan/ggml-vulkan.cpp +3492 -883
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +41 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add_id.comp +42 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +13 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +39 -29
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp +349 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +66 -12
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +154 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +2 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +6 -5
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +4 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_mxfp4.comp +32 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/exp.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +69 -24
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.comp +60 -20
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +98 -42
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +64 -27
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +74 -13
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu_erf.comp +27 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu_quick.comp +11 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_erf.comp +39 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp +4 -17
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +19 -10
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +25 -15
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/glu_head.comp +4 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/hardsigmoid.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/hardswish.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +18 -14
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col_3d.comp +126 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +65 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +11 -7
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vecq.comp +140 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +144 -531
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +206 -38
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_funcs.comp +556 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +12 -5
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +15 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/multi_add.comp +111 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_sgd.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +24 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +53 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +55 -11
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_partials.comp +65 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/roll.comp +46 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +1 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +7 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +7 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +7 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rte.comp +5 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +29 -7
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +4 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sqrt.comp +17 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +38 -5
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/swiglu_oai.comp +14 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +4 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +101 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +69 -5
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/utils.comp +25 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +335 -77
- data/ext/sources/ggml/src/ggml-webgpu/CMakeLists.txt +54 -0
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu.cpp +1558 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/add.tmpl.wgsl +44 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/add_in_place.tmpl.wgsl +41 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary_head.tmpl +45 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/common_decls.tmpl +930 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cpy.wgsl +60 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/embed_wgsl.py +124 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/get_rows.tmpl.wgsl +874 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/memset.wgsl +40 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul.tmpl.wgsl +44 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_in_place.tmpl.wgsl +41 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat.tmpl.wgsl +907 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm.wgsl +57 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm_in_place.wgsl +48 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.wgsl +81 -0
- data/ext/sources/ggml/src/ggml-zdnn/CMakeLists.txt +36 -0
- data/ext/sources/ggml/src/ggml-zdnn/common.hpp +59 -0
- data/ext/sources/ggml/src/ggml-zdnn/ggml-zdnn.cpp +628 -0
- data/ext/sources/ggml/src/ggml-zdnn/mmf.cpp +80 -0
- data/ext/sources/ggml/src/ggml-zdnn/mmf.hpp +12 -0
- data/ext/sources/ggml/src/ggml-zdnn/utils.cpp +79 -0
- data/ext/sources/ggml/src/ggml-zdnn/utils.hpp +19 -0
- data/ext/sources/ggml/src/ggml.c +478 -98
- data/ext/sources/ggml/src/gguf.cpp +8 -1
- data/ext/sources/src/whisper.cpp +23 -46
- data/ext/sources/tests/CMakeLists.txt +8 -1
- data/ext/sources/tests/test-vad-full.cpp +3 -3
- data/ext/sources/tests/test-vad.cpp +2 -2
- data/lib/whisper/model/uri.rb +1 -1
- data/sig/whisper.rbs +7 -0
- data/test/test_params.rb +8 -0
- data/test/test_whisper.rb +1 -1
- data/whispercpp.gemspec +1 -1
- metadata +164 -157
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified-iswa.cpp +0 -279
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified.cpp +0 -1841
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified.h +0 -303
- data/ext/sources/ggml/include/ggml-kompute.h +0 -50
- data/ext/sources/ggml/src/ggml-amx/CMakeLists.txt +0 -107
- data/ext/sources/ggml/src/ggml-amx/common.h +0 -94
- data/ext/sources/ggml/src/ggml-amx/ggml-amx.cpp +0 -446
- data/ext/sources/ggml/src/ggml-amx/mmq.cpp +0 -2510
- data/ext/sources/ggml/src/ggml-amx/mmq.h +0 -17
- data/ext/sources/ggml/src/ggml-cann/kernels/CMakeLists.txt +0 -30
- data/ext/sources/ggml/src/ggml-cann/kernels/ascendc_kernels.h +0 -19
- data/ext/sources/ggml/src/ggml-cann/kernels/dup.cpp +0 -234
- data/ext/sources/ggml/src/ggml-cann/kernels/get_row_f16.cpp +0 -197
- data/ext/sources/ggml/src/ggml-cann/kernels/get_row_f32.cpp +0 -190
- data/ext/sources/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +0 -204
- data/ext/sources/ggml/src/ggml-cann/kernels/get_row_q8_0.cpp +0 -191
- data/ext/sources/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +0 -218
- data/ext/sources/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +0 -216
- data/ext/sources/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +0 -295
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cu +0 -357
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cuh +0 -3
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cu +0 -365
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cuh +0 -3
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f16.cuh +0 -482
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f32.cuh +0 -472
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
- data/ext/sources/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/common.comp +0 -112
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_add.comp +0 -58
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_addrow.comp +0 -25
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f16.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f32.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f16.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f32.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_diagmask.comp +0 -30
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_gelu.comp +0 -22
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows.comp +0 -17
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f16.comp +0 -31
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f32.comp +0 -31
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_0.comp +0 -38
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_1.comp +0 -39
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q6_k.comp +0 -44
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp +0 -69
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_mat_f32.comp +0 -51
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_0.comp +0 -33
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_1.comp +0 -35
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp +0 -140
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp +0 -106
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q8_0.comp +0 -73
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp +0 -28
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_norm.comp +0 -84
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_relu.comp +0 -21
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rmsnorm.comp +0 -53
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale.comp +0 -19
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale_8.comp +0 -23
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_silu.comp +0 -22
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp +0 -72
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp +0 -71
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.m +0 -6280
@@ -1,26 +1,27 @@
|
|
1
1
|
#include "quantize.cuh"
|
2
2
|
#include <cstdint>
|
3
3
|
|
4
|
+
__launch_bounds__(CUDA_QUANTIZE_BLOCK_SIZE, 1)
|
4
5
|
static __global__ void quantize_q8_1(
|
5
6
|
const float * __restrict__ x, void * __restrict__ vy,
|
6
7
|
const int64_t ne00, const int64_t s01, const int64_t s02, const int64_t s03,
|
7
|
-
const int64_t ne0, const
|
8
|
+
const int64_t ne0, const uint32_t ne1, const uint3 ne2) {
|
8
9
|
const int64_t i0 = (int64_t)blockDim.x*blockIdx.x + threadIdx.x;
|
9
10
|
|
10
11
|
if (i0 >= ne0) {
|
11
12
|
return;
|
12
13
|
}
|
13
14
|
|
15
|
+
const int64_t i3 = fastdiv(blockIdx.z, ne2);
|
16
|
+
const int64_t i2 = blockIdx.z - i3*ne2.z;
|
14
17
|
const int64_t i1 = blockIdx.y;
|
15
|
-
const int64_t i2 = blockIdx.z % ne2;
|
16
|
-
const int64_t i3 = blockIdx.z / ne2;
|
17
18
|
|
18
19
|
const int64_t & i00 = i0;
|
19
20
|
const int64_t & i01 = i1;
|
20
21
|
const int64_t & i02 = i2;
|
21
22
|
const int64_t & i03 = i3;
|
22
23
|
|
23
|
-
const int64_t i_cont = ((i3*ne2 + i2) * ne1 + i1) * ne0 + i0;
|
24
|
+
const int64_t i_cont = ((i3*ne2.z + i2) * ne1 + i1) * ne0 + i0;
|
24
25
|
|
25
26
|
block_q8_1 * y = (block_q8_1 *) vy;
|
26
27
|
|
@@ -31,10 +32,10 @@ static __global__ void quantize_q8_1(
|
|
31
32
|
float amax = fabsf(xi);
|
32
33
|
float sum = xi;
|
33
34
|
|
34
|
-
amax = warp_reduce_max(amax);
|
35
|
-
sum = warp_reduce_sum(sum);
|
35
|
+
amax = warp_reduce_max<QK8_1>(amax);
|
36
|
+
sum = warp_reduce_sum<QK8_1>(sum);
|
36
37
|
|
37
|
-
const float d = amax / 127;
|
38
|
+
const float d = amax / 127.0f;
|
38
39
|
const int8_t q = amax == 0.0f ? 0 : roundf(xi / d);
|
39
40
|
|
40
41
|
y[ib].qs[iqs] = q;
|
@@ -43,8 +44,7 @@ static __global__ void quantize_q8_1(
|
|
43
44
|
return;
|
44
45
|
}
|
45
46
|
|
46
|
-
|
47
|
-
reinterpret_cast<half&>(y[ib].ds.y) = sum;
|
47
|
+
y[ib].ds = make_half2(d, sum);
|
48
48
|
}
|
49
49
|
|
50
50
|
template <mmq_q8_1_ds_layout ds_layout>
|
@@ -152,10 +152,12 @@ void quantize_row_q8_1_cuda(
|
|
152
152
|
GGML_ASSERT(!ids);
|
153
153
|
GGML_ASSERT(ne0 % QK8_1 == 0);
|
154
154
|
|
155
|
+
const uint3 ne2_fastdiv = init_fastdiv_values(ne2);
|
156
|
+
|
155
157
|
const int64_t block_num_x = (ne0 + CUDA_QUANTIZE_BLOCK_SIZE - 1) / CUDA_QUANTIZE_BLOCK_SIZE;
|
156
158
|
const dim3 num_blocks(block_num_x, ne1, ne2*ne3);
|
157
159
|
const dim3 block_size(CUDA_QUANTIZE_BLOCK_SIZE, 1, 1);
|
158
|
-
quantize_q8_1<<<num_blocks, block_size, 0, stream>>>(x, vy, ne00, s01, s02, s03, ne0, ne1,
|
160
|
+
quantize_q8_1<<<num_blocks, block_size, 0, stream>>>(x, vy, ne00, s01, s02, s03, ne0, ne1, ne2_fastdiv);
|
159
161
|
GGML_UNUSED(type_src0);
|
160
162
|
}
|
161
163
|
|
@@ -0,0 +1,53 @@
|
|
1
|
+
#include "common.cuh"
|
2
|
+
|
3
|
+
// Row reduction kernel template - compute sum (norm=false) or mean (norm=true)
|
4
|
+
template <bool norm>
|
5
|
+
static __global__ void reduce_rows_f32(const float * __restrict__ x, float * __restrict__ dst, const int ncols) {
|
6
|
+
const int row = blockIdx.x;
|
7
|
+
const int col = threadIdx.x;
|
8
|
+
|
9
|
+
float sum = 0.0f;
|
10
|
+
const int num_unroll = 8;
|
11
|
+
float temp[num_unroll];
|
12
|
+
float sum_temp[num_unroll] = { 0.0f };
|
13
|
+
for (int i = col; i < ncols;) {
|
14
|
+
for (int j = 0; j < num_unroll; ++j) {
|
15
|
+
if (i < ncols) {
|
16
|
+
temp[j] = x[row * ncols + i];
|
17
|
+
} else {
|
18
|
+
temp[j] = 0;
|
19
|
+
}
|
20
|
+
i += blockDim.x;
|
21
|
+
}
|
22
|
+
for (int j = 0; j < num_unroll; ++j) {
|
23
|
+
sum_temp[j] += temp[j];
|
24
|
+
}
|
25
|
+
}
|
26
|
+
for (int j = 0; j < num_unroll; ++j) {
|
27
|
+
sum += sum_temp[j];
|
28
|
+
}
|
29
|
+
|
30
|
+
// sum up partial sums
|
31
|
+
sum = warp_reduce_sum(sum);
|
32
|
+
if (blockDim.x > WARP_SIZE) {
|
33
|
+
assert((blockDim.x <= 1024) && (blockDim.x % WARP_SIZE) == 0);
|
34
|
+
__shared__ float s_sum[32];
|
35
|
+
const int warp_id = threadIdx.x / WARP_SIZE;
|
36
|
+
const int lane_id = threadIdx.x % WARP_SIZE;
|
37
|
+
if (lane_id == 0) {
|
38
|
+
s_sum[warp_id] = sum;
|
39
|
+
}
|
40
|
+
__syncthreads();
|
41
|
+
sum = 0.0f;
|
42
|
+
if (lane_id < (static_cast<int>(blockDim.x) / WARP_SIZE)) {
|
43
|
+
sum = s_sum[lane_id];
|
44
|
+
}
|
45
|
+
sum = warp_reduce_sum(sum);
|
46
|
+
}
|
47
|
+
|
48
|
+
if (col != 0) {
|
49
|
+
return;
|
50
|
+
}
|
51
|
+
|
52
|
+
dst[row] = norm ? sum / ncols : sum;
|
53
|
+
}
|
@@ -0,0 +1,67 @@
|
|
1
|
+
#include "ggml-cuda/common.cuh"
|
2
|
+
#include "roll.cuh"
|
3
|
+
|
4
|
+
static __forceinline__ __device__ int64_t wrap_index(const int64_t idx, const int64_t ne) {
|
5
|
+
if (idx < 0) {
|
6
|
+
return idx + ne;
|
7
|
+
}
|
8
|
+
if (idx >= ne) {
|
9
|
+
return idx - ne;
|
10
|
+
}
|
11
|
+
return idx;
|
12
|
+
}
|
13
|
+
|
14
|
+
static __global__ void roll_f32_cuda(const float * __restrict__ src,
|
15
|
+
float * __restrict__ dst,
|
16
|
+
const int64_t ne00,
|
17
|
+
const int64_t ne01,
|
18
|
+
const int64_t ne02,
|
19
|
+
const int64_t ne03,
|
20
|
+
const int s0,
|
21
|
+
const int s1,
|
22
|
+
const int s2,
|
23
|
+
const int s3) {
|
24
|
+
const int64_t idx = int64_t(blockDim.x) * blockIdx.x + threadIdx.x;
|
25
|
+
const int64_t n_elements = ne00 * ne01 * ne02 * ne03;
|
26
|
+
|
27
|
+
if (idx >= n_elements) {
|
28
|
+
return;
|
29
|
+
}
|
30
|
+
|
31
|
+
const int64_t i0 = idx % ne00;
|
32
|
+
const int64_t i1 = (idx / ne00) % ne01;
|
33
|
+
const int64_t i2 = (idx / (ne00 * ne01)) % ne02;
|
34
|
+
const int64_t i3 = (idx / (ne00 * ne01 * ne02)) % ne03;
|
35
|
+
|
36
|
+
const int64_t d0 = wrap_index(i0 - s0, ne00);
|
37
|
+
const int64_t d1 = wrap_index(i1 - s1, ne01);
|
38
|
+
const int64_t d2 = wrap_index(i2 - s2, ne02);
|
39
|
+
const int64_t d3 = wrap_index(i3 - s3, ne03);
|
40
|
+
|
41
|
+
dst[i3 * (ne00 * ne01 * ne02) + i2 * (ne01 * ne00) + i1 * ne00 + i0] =
|
42
|
+
src[d3 * (ne00 * ne01 * ne02) + d2 * (ne01 * ne00) + d1 * ne00 + d0];
|
43
|
+
}
|
44
|
+
|
45
|
+
void ggml_cuda_op_roll(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
46
|
+
int s0 = dst->op_params[0];
|
47
|
+
int s1 = dst->op_params[1];
|
48
|
+
int s2 = dst->op_params[2];
|
49
|
+
int s3 = dst->op_params[3];
|
50
|
+
|
51
|
+
const ggml_tensor * src0 = dst->src[0];
|
52
|
+
const float * src0_d = (const float *) dst->src[0]->data;
|
53
|
+
float * dst_d = (float *) dst->data;
|
54
|
+
|
55
|
+
GGML_TENSOR_UNARY_OP_LOCALS;
|
56
|
+
|
57
|
+
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
|
58
|
+
GGML_ASSERT(ggml_are_same_shape(dst->src[0], dst));
|
59
|
+
|
60
|
+
cudaStream_t stream = ctx.stream();
|
61
|
+
|
62
|
+
int64_t sz = (ne00 * ne01 * ne02 * ne03);
|
63
|
+
int64_t num_blocks = (sz + CUDA_ROLL_BLOCK_SIZE - 1) / CUDA_ROLL_BLOCK_SIZE;
|
64
|
+
|
65
|
+
roll_f32_cuda<<<num_blocks, CUDA_ROLL_BLOCK_SIZE, 0, stream>>>(
|
66
|
+
src0_d, dst_d, ne00, ne01, ne02, ne03, s0, s1, s2, s3);
|
67
|
+
}
|
@@ -50,21 +50,19 @@ static __global__ void rope_norm(
|
|
50
50
|
|
51
51
|
const int row_dst = blockDim.x*blockIdx.x + threadIdx.x;
|
52
52
|
|
53
|
-
if (i0 >= n_dims) {
|
54
|
-
const int i = row_dst*ne0 + i0;
|
55
|
-
|
56
|
-
dst[i + 0] = x[i + 0];
|
57
|
-
dst[i + 1] = x[i + 1];
|
58
|
-
|
59
|
-
return;
|
60
|
-
}
|
61
|
-
|
62
53
|
const int row_x = row_dst % ne1;
|
63
54
|
const int channel_x = row_dst / ne1;
|
64
55
|
|
65
56
|
const int idst = row_dst*ne0 + i0;
|
66
57
|
const int ix = channel_x*s2 + row_x*s1 + i0;
|
67
58
|
|
59
|
+
if (i0 >= n_dims) {
|
60
|
+
dst[idst + 0] = x[ix + 0];
|
61
|
+
dst[idst + 1] = x[ix + 1];
|
62
|
+
|
63
|
+
return;
|
64
|
+
}
|
65
|
+
|
68
66
|
const float theta_base = pos[channel_x]*powf(theta_scale, i0/2.0f);
|
69
67
|
|
70
68
|
const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f;
|
@@ -94,21 +92,19 @@ static __global__ void rope_neox(
|
|
94
92
|
|
95
93
|
const int row_dst = blockDim.x*blockIdx.x + threadIdx.x;
|
96
94
|
|
97
|
-
if (i0 >= n_dims) {
|
98
|
-
const int i = row_dst*ne0 + i0;
|
99
|
-
|
100
|
-
dst[i + 0] = x[i + 0];
|
101
|
-
dst[i + 1] = x[i + 1];
|
102
|
-
|
103
|
-
return;
|
104
|
-
}
|
105
|
-
|
106
95
|
const int row_x = row_dst % ne1;
|
107
96
|
const int channel_x = row_dst / ne1;
|
108
97
|
|
109
98
|
const int idst = row_dst*ne0 + i0/2;
|
110
99
|
const int ix = channel_x*s2 + row_x*s1 + i0/2;
|
111
100
|
|
101
|
+
if (i0 >= n_dims) {
|
102
|
+
dst[idst + i0/2 + 0] = x[ix + i0/2 + 0];
|
103
|
+
dst[idst + i0/2 + 1] = x[ix + i0/2 + 1];
|
104
|
+
|
105
|
+
return;
|
106
|
+
}
|
107
|
+
|
112
108
|
const float theta_base = pos[channel_x]*powf(theta_scale, i0/2.0f);
|
113
109
|
|
114
110
|
const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f;
|
@@ -138,21 +134,19 @@ static __global__ void rope_multi(
|
|
138
134
|
|
139
135
|
const int row_dst = blockDim.x*blockIdx.x + threadIdx.x;
|
140
136
|
|
141
|
-
if (i0 >= n_dims) {
|
142
|
-
const int i = row_dst*ne0 + i0;
|
143
|
-
|
144
|
-
dst[i + 0] = x[i + 0];
|
145
|
-
dst[i + 1] = x[i + 1];
|
146
|
-
|
147
|
-
return;
|
148
|
-
}
|
149
|
-
|
150
137
|
const int row_x = row_dst % ne1;
|
151
138
|
const int channel_x = row_dst / ne1;
|
152
139
|
|
153
140
|
const int idst = row_dst*ne0 + i0/2;
|
154
141
|
const int ix = channel_x*s2 + row_x*s1 + i0/2;
|
155
142
|
|
143
|
+
if (i0 >= n_dims) {
|
144
|
+
dst[idst + i0/2 + 0] = x[ix + i0/2 + 0];
|
145
|
+
dst[idst + i0/2 + 1] = x[ix + i0/2 + 1];
|
146
|
+
|
147
|
+
return;
|
148
|
+
}
|
149
|
+
|
156
150
|
const int sect_dims = sections.v[0] + sections.v[1] + sections.v[2] + sections.v[3];
|
157
151
|
const int sec_w = sections.v[1] + sections.v[0];
|
158
152
|
const int sector = (i0 / 2) % sect_dims;
|
@@ -1,18 +1,19 @@
|
|
1
1
|
#include "scale.cuh"
|
2
2
|
|
3
|
-
|
4
|
-
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
3
|
+
#define MAX_GRIDDIM_X 0x7FFFFFFF
|
5
4
|
|
6
|
-
|
7
|
-
|
8
|
-
|
5
|
+
static __global__ void scale_f32(const float * x, float * dst, const float scale, const float bias, const int64_t nelements) {
|
6
|
+
int64_t tid = (int64_t)blockIdx.x * (int64_t)blockDim.x + (int64_t)threadIdx.x;
|
7
|
+
int64_t stride = (int64_t)blockDim.x * (int64_t)gridDim.x;
|
9
8
|
|
10
|
-
|
9
|
+
for (int64_t i = tid; i < nelements; i += stride) {
|
10
|
+
dst[i] = scale * x[i] + bias;
|
11
|
+
}
|
11
12
|
}
|
12
13
|
|
13
|
-
static void scale_f32_cuda(const float * x, float * dst, const float scale, const
|
14
|
-
const
|
15
|
-
scale_f32<<<num_blocks, CUDA_SCALE_BLOCK_SIZE, 0, stream>>>(x, dst, scale,
|
14
|
+
static void scale_f32_cuda(const float * x, float * dst, const float scale, const float bias, const int64_t nelements, cudaStream_t stream) {
|
15
|
+
const int64_t num_blocks = (nelements + CUDA_SCALE_BLOCK_SIZE - 1) / CUDA_SCALE_BLOCK_SIZE;
|
16
|
+
scale_f32<<<MIN(MAX_GRIDDIM_X, num_blocks), CUDA_SCALE_BLOCK_SIZE, 0, stream>>>(x, dst, scale, bias, nelements);
|
16
17
|
}
|
17
18
|
|
18
19
|
void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
@@ -25,7 +26,9 @@ void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
25
26
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
26
27
|
|
27
28
|
float scale;
|
28
|
-
|
29
|
+
float bias;
|
30
|
+
memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
|
31
|
+
memcpy(&bias, (float *) dst->op_params + 1, sizeof(float));
|
29
32
|
|
30
|
-
scale_f32_cuda(src0_d, dst_d, scale, ggml_nelements(src0), stream);
|
33
|
+
scale_f32_cuda(src0_d, dst_d, scale, bias, ggml_nelements(src0), stream);
|
31
34
|
}
|
@@ -0,0 +1,276 @@
|
|
1
|
+
#include "set-rows.cuh"
|
2
|
+
#include "cpy-utils.cuh"
|
3
|
+
|
4
|
+
typedef void (*set_rows_kernel_t)(const char * src, char * dst);
|
5
|
+
|
6
|
+
// Generic quantized set_rows kernel template
|
7
|
+
template<typename idx_t, typename block_type, int qk, void (*quantize_func)(const float*, block_type*)>
|
8
|
+
static __global__ void k_set_rows_quant(
|
9
|
+
const float * __restrict__ src0, const idx_t * __restrict__ src1, block_type * __restrict__ dst,
|
10
|
+
const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03,
|
11
|
+
const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13,
|
12
|
+
const int64_t s01, const int64_t s02, const int64_t s03,
|
13
|
+
const int64_t s10, const int64_t s11, const int64_t s12,
|
14
|
+
const int64_t s1, const int64_t s2, const int64_t s3) {
|
15
|
+
|
16
|
+
const int64_t i = int64_t(blockDim.x) * blockIdx.x + threadIdx.x;
|
17
|
+
const int64_t ne_total = (ne00 * ne01 * ne02 * ne03) / qk;
|
18
|
+
|
19
|
+
if (i >= ne_total) {
|
20
|
+
return;
|
21
|
+
}
|
22
|
+
|
23
|
+
const int64_t i_base = i * qk;
|
24
|
+
const int64_t i03 = i_base / (ne00 * ne01 * ne02);
|
25
|
+
const int64_t i02 = (i_base - i03 * ne00 * ne01 * ne02) / (ne00 * ne01);
|
26
|
+
const int64_t i01 = (i_base - i03 * ne00 * ne01 * ne02 - i02 * ne00 * ne01) / ne00;
|
27
|
+
const int64_t i00 = i_base - i03 * ne00 * ne01 * ne02 - i02 * ne00 * ne01 - i01 * ne00;
|
28
|
+
|
29
|
+
const int64_t i12 = i03 % ne12;
|
30
|
+
const int64_t i11 = i02 % ne11;
|
31
|
+
const int64_t i10 = i01;
|
32
|
+
|
33
|
+
const int64_t dst_row = *(src1 + i10*s10 + i11*s11 + i12*s12);
|
34
|
+
|
35
|
+
const float * src0_row = src0 + i01*s01 + i02*s02 + i03*s03;
|
36
|
+
block_type * dst_row_ptr = dst + (dst_row*s1 + i02*s2 + i03*s3) / sizeof(block_type);
|
37
|
+
|
38
|
+
const float * src_block = src0_row + i00;
|
39
|
+
block_type * dst_block = dst_row_ptr + i00 / qk;
|
40
|
+
|
41
|
+
quantize_func(src_block, dst_block);
|
42
|
+
|
43
|
+
GGML_UNUSED(ne10);
|
44
|
+
GGML_UNUSED(ne13);
|
45
|
+
}
|
46
|
+
|
47
|
+
// Template dispatch function for quantized set_rows
|
48
|
+
template<typename idx_t, typename block_type, int qk, void (*quantize_func)(const float*, block_type*)>
|
49
|
+
static void set_rows_cuda_quant(
|
50
|
+
const float * src0_d, const idx_t * src1_d, block_type * dst_d,
|
51
|
+
const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03,
|
52
|
+
const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13,
|
53
|
+
const size_t nb01, const size_t nb02, const size_t nb03,
|
54
|
+
const size_t nb10, const size_t nb11, const size_t nb12,
|
55
|
+
const size_t nb1, const size_t nb2, const size_t nb3,
|
56
|
+
cudaStream_t stream) {
|
57
|
+
|
58
|
+
GGML_ASSERT(ne00 % qk == 0);
|
59
|
+
const int64_t ne_total = (ne00 * ne01 * ne02 * ne03) / qk;
|
60
|
+
const int num_blocks = (ne_total + CUDA_SET_ROWS_BLOCK_SIZE - 1) / CUDA_SET_ROWS_BLOCK_SIZE;
|
61
|
+
const dim3 block_size(CUDA_SET_ROWS_BLOCK_SIZE);
|
62
|
+
const dim3 grid_size(num_blocks);
|
63
|
+
|
64
|
+
const int64_t s01 = nb01/sizeof(float);
|
65
|
+
const int64_t s02 = nb02/sizeof(float);
|
66
|
+
const int64_t s03 = nb03/sizeof(float);
|
67
|
+
const int64_t s10 = nb10/sizeof(idx_t);
|
68
|
+
const int64_t s11 = nb11/sizeof(idx_t);
|
69
|
+
const int64_t s12 = nb12/sizeof(idx_t);
|
70
|
+
const int64_t s1 = nb1;
|
71
|
+
const int64_t s2 = nb2;
|
72
|
+
const int64_t s3 = nb3;
|
73
|
+
|
74
|
+
if (ne_total > 0) {
|
75
|
+
k_set_rows_quant<idx_t, block_type, qk, quantize_func><<<grid_size, block_size, 0, stream>>>(
|
76
|
+
src0_d, src1_d, dst_d,
|
77
|
+
ne00, ne01, ne02, ne03,
|
78
|
+
ne10, ne11, ne12, ne13,
|
79
|
+
s01, s02, s03,
|
80
|
+
s10, s11, s12,
|
81
|
+
s1, s2, s3);
|
82
|
+
}
|
83
|
+
}
|
84
|
+
|
85
|
+
template<typename src_t, typename idx_t, typename dst_t>
|
86
|
+
static __global__ void k_set_rows(
|
87
|
+
const src_t * __restrict__ src0, const idx_t * __restrict__ src1, dst_t * __restrict__ dst,
|
88
|
+
const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03,
|
89
|
+
const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13,
|
90
|
+
const int64_t s01, const int64_t s02, const int64_t s03,
|
91
|
+
const int64_t s10, const int64_t s11, const int64_t s12,
|
92
|
+
const int64_t s1, const int64_t s2, const int64_t s3) {
|
93
|
+
|
94
|
+
const int64_t i = int64_t(blockDim.x) * blockIdx.x + threadIdx.x;
|
95
|
+
const int64_t ne_total = ne00 * ne01 * ne02 * ne03;
|
96
|
+
|
97
|
+
if (i >= ne_total) {
|
98
|
+
return;
|
99
|
+
}
|
100
|
+
|
101
|
+
const int64_t i03 = i / (ne00 * ne01 * ne02);
|
102
|
+
const int64_t i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01);
|
103
|
+
const int64_t i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne00 * ne01) / ne00;
|
104
|
+
const int64_t i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne00 * ne01 - i01 * ne00;
|
105
|
+
|
106
|
+
const int64_t i12 = i03 % ne12;
|
107
|
+
const int64_t i11 = i02 % ne11;
|
108
|
+
const int64_t i10 = i01;
|
109
|
+
|
110
|
+
const int64_t dst_row = *(src1 + i10*s10 + i11*s11 + i12*s12);
|
111
|
+
|
112
|
+
const src_t * src0_row = src0 + i01*s01 + i02*s02 + i03*s03;
|
113
|
+
dst_t * dst_row_ptr = dst + dst_row*s1 + i02*s2 + i03*s3;
|
114
|
+
|
115
|
+
dst_row_ptr[i00] = ggml_cuda_cast<dst_t>(src0_row[i00]);
|
116
|
+
|
117
|
+
GGML_UNUSED(ne10);
|
118
|
+
GGML_UNUSED(ne13);
|
119
|
+
}
|
120
|
+
|
121
|
+
template<typename src_t, typename idx_t, typename dst_t>
|
122
|
+
static void set_rows_cuda(
|
123
|
+
const src_t * src0_d, const idx_t * src1_d, dst_t * dst_d,
|
124
|
+
const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03,
|
125
|
+
const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13,
|
126
|
+
const size_t nb01, const size_t nb02, const size_t nb03,
|
127
|
+
const size_t nb10, const size_t nb11, const size_t nb12,
|
128
|
+
const size_t nb1, const size_t nb2, const size_t nb3,
|
129
|
+
cudaStream_t stream) {
|
130
|
+
|
131
|
+
const int64_t ne_total = ne00 * ne01 * ne02 * ne03;
|
132
|
+
const int num_blocks = (ne_total + CUDA_SET_ROWS_BLOCK_SIZE - 1) / CUDA_SET_ROWS_BLOCK_SIZE;
|
133
|
+
const dim3 block_size(CUDA_SET_ROWS_BLOCK_SIZE);
|
134
|
+
const dim3 grid_size(num_blocks);
|
135
|
+
|
136
|
+
|
137
|
+
const int64_t s01 = nb01/sizeof(src_t);
|
138
|
+
const int64_t s02 = nb02/sizeof(src_t);
|
139
|
+
const int64_t s03 = nb03/sizeof(src_t);
|
140
|
+
const int64_t s10 = nb10/sizeof(idx_t);
|
141
|
+
const int64_t s11 = nb11/sizeof(idx_t);
|
142
|
+
const int64_t s12 = nb12/sizeof(idx_t);
|
143
|
+
const int64_t s1 = nb1/sizeof(dst_t);
|
144
|
+
const int64_t s2 = nb2/sizeof(dst_t);
|
145
|
+
const int64_t s3 = nb3/sizeof(dst_t);
|
146
|
+
|
147
|
+
if (ne_total > 0) {
|
148
|
+
k_set_rows<<<grid_size, block_size, 0, stream>>>(
|
149
|
+
src0_d, src1_d, dst_d,
|
150
|
+
ne00, ne01, ne02, ne03,
|
151
|
+
ne10, ne11, ne12, ne13,
|
152
|
+
s01, s02, s03,
|
153
|
+
s10, s11, s12,
|
154
|
+
s1, s2, s3);
|
155
|
+
}
|
156
|
+
}
|
157
|
+
|
158
|
+
template<typename src_t, typename idx_t>
|
159
|
+
static void set_rows_cuda(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
160
|
+
const src_t * src0_d = (const src_t *)src0->data;
|
161
|
+
const idx_t * src1_d = (const idx_t *)src1->data;
|
162
|
+
|
163
|
+
GGML_TENSOR_BINARY_OP_LOCALS
|
164
|
+
|
165
|
+
cudaStream_t stream = ctx.stream();
|
166
|
+
|
167
|
+
|
168
|
+
if (dst->type == GGML_TYPE_F32) {
|
169
|
+
set_rows_cuda(
|
170
|
+
src0_d, src1_d, (float*)dst->data,
|
171
|
+
ne00, ne01, ne02, ne03,
|
172
|
+
ne10, ne11, ne12, ne13,
|
173
|
+
nb01, nb02, nb03,
|
174
|
+
nb10, nb11, nb12,
|
175
|
+
nb1, nb2, nb3,
|
176
|
+
stream
|
177
|
+
);
|
178
|
+
} else if (dst->type == GGML_TYPE_F16) {
|
179
|
+
set_rows_cuda(
|
180
|
+
src0_d, src1_d, (half*)dst->data,
|
181
|
+
ne00, ne01, ne02, ne03,
|
182
|
+
ne10, ne11, ne12, ne13,
|
183
|
+
nb01, nb02, nb03,
|
184
|
+
nb10, nb11, nb12,
|
185
|
+
nb1, nb2, nb3,
|
186
|
+
stream
|
187
|
+
);
|
188
|
+
} else if (dst->type == GGML_TYPE_BF16) {
|
189
|
+
set_rows_cuda(
|
190
|
+
src0_d, src1_d, (nv_bfloat16*)dst->data,
|
191
|
+
ne00, ne01, ne02, ne03,
|
192
|
+
ne10, ne11, ne12, ne13,
|
193
|
+
nb01, nb02, nb03,
|
194
|
+
nb10, nb11, nb12,
|
195
|
+
nb1, nb2, nb3,
|
196
|
+
stream
|
197
|
+
);
|
198
|
+
} else if (dst->type == GGML_TYPE_Q4_0) {
|
199
|
+
set_rows_cuda_quant<idx_t, block_q4_0, QK4_0, quantize_f32_q4_0_block>(
|
200
|
+
src0_d, src1_d, (block_q4_0*)dst->data,
|
201
|
+
ne00, ne01, ne02, ne03,
|
202
|
+
ne10, ne11, ne12, ne13,
|
203
|
+
nb01, nb02, nb03,
|
204
|
+
nb10, nb11, nb12,
|
205
|
+
nb1, nb2, nb3,
|
206
|
+
stream
|
207
|
+
);
|
208
|
+
} else if (dst->type == GGML_TYPE_Q4_1) {
|
209
|
+
set_rows_cuda_quant<idx_t, block_q4_1, QK4_1, quantize_f32_q4_1_block>(
|
210
|
+
src0_d, src1_d, (block_q4_1*)dst->data,
|
211
|
+
ne00, ne01, ne02, ne03,
|
212
|
+
ne10, ne11, ne12, ne13,
|
213
|
+
nb01, nb02, nb03,
|
214
|
+
nb10, nb11, nb12,
|
215
|
+
nb1, nb2, nb3,
|
216
|
+
stream
|
217
|
+
);
|
218
|
+
} else if (dst->type == GGML_TYPE_Q5_0) {
|
219
|
+
set_rows_cuda_quant<idx_t, block_q5_0, QK5_0, quantize_f32_q5_0_block>(
|
220
|
+
src0_d, src1_d, (block_q5_0*)dst->data,
|
221
|
+
ne00, ne01, ne02, ne03,
|
222
|
+
ne10, ne11, ne12, ne13,
|
223
|
+
nb01, nb02, nb03,
|
224
|
+
nb10, nb11, nb12,
|
225
|
+
nb1, nb2, nb3,
|
226
|
+
stream
|
227
|
+
);
|
228
|
+
} else if (dst->type == GGML_TYPE_Q5_1) {
|
229
|
+
set_rows_cuda_quant<idx_t, block_q5_1, QK5_1, quantize_f32_q5_1_block>(
|
230
|
+
src0_d, src1_d, (block_q5_1*)dst->data,
|
231
|
+
ne00, ne01, ne02, ne03,
|
232
|
+
ne10, ne11, ne12, ne13,
|
233
|
+
nb01, nb02, nb03,
|
234
|
+
nb10, nb11, nb12,
|
235
|
+
nb1, nb2, nb3,
|
236
|
+
stream
|
237
|
+
);
|
238
|
+
} else if (dst->type == GGML_TYPE_Q8_0) {
|
239
|
+
set_rows_cuda_quant<idx_t, block_q8_0, QK8_0, quantize_f32_q8_0_block>(
|
240
|
+
src0_d, src1_d, (block_q8_0*)dst->data,
|
241
|
+
ne00, ne01, ne02, ne03,
|
242
|
+
ne10, ne11, ne12, ne13,
|
243
|
+
nb01, nb02, nb03,
|
244
|
+
nb10, nb11, nb12,
|
245
|
+
nb1, nb2, nb3,
|
246
|
+
stream
|
247
|
+
);
|
248
|
+
} else if (dst->type == GGML_TYPE_IQ4_NL) {
|
249
|
+
set_rows_cuda_quant<idx_t, block_iq4_nl, QK4_NL, quantize_f32_iq4_nl_block>(
|
250
|
+
src0_d, src1_d, (block_iq4_nl*)dst->data,
|
251
|
+
ne00, ne01, ne02, ne03,
|
252
|
+
ne10, ne11, ne12, ne13,
|
253
|
+
nb01, nb02, nb03,
|
254
|
+
nb10, nb11, nb12,
|
255
|
+
nb1, nb2, nb3,
|
256
|
+
stream
|
257
|
+
);
|
258
|
+
} else {
|
259
|
+
GGML_ABORT("unsupported type %s", ggml_type_name(dst->type));
|
260
|
+
}
|
261
|
+
}
|
262
|
+
|
263
|
+
|
264
|
+
void ggml_cuda_op_set_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
265
|
+
const ggml_tensor * src0 = dst->src[0];
|
266
|
+
const ggml_tensor * src1 = dst->src[1];
|
267
|
+
|
268
|
+
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
269
|
+
GGML_ASSERT(src1->type == GGML_TYPE_I64 || src1->type == GGML_TYPE_I32);
|
270
|
+
|
271
|
+
if (src1->type == GGML_TYPE_I64) {
|
272
|
+
set_rows_cuda<float, int64_t>(ctx, src0, src1, dst);
|
273
|
+
} else {
|
274
|
+
set_rows_cuda<float, int32_t>(ctx, src0, src1, dst);
|
275
|
+
}
|
276
|
+
}
|
@@ -0,0 +1,34 @@
|
|
1
|
+
#include "softcap.cuh"
|
2
|
+
|
3
|
+
static __global__ void softcap_f32(const float * x, float * dst, const float scale, const float softcap, const int k) {
|
4
|
+
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
5
|
+
|
6
|
+
if (i >= k) {
|
7
|
+
return;
|
8
|
+
}
|
9
|
+
|
10
|
+
dst[i] = tanhf(scale * x[i]) * softcap;
|
11
|
+
}
|
12
|
+
|
13
|
+
static void softcap_f32_cuda(const float * x, float * dst, const float scale, const float softcap, const int k, cudaStream_t stream) {
|
14
|
+
const int num_blocks = (k + CUDA_SOFTCAP_BLOCK_SIZE - 1) / CUDA_SOFTCAP_BLOCK_SIZE;
|
15
|
+
softcap_f32<<<num_blocks, CUDA_SOFTCAP_BLOCK_SIZE, 0, stream>>>(x, dst, scale, softcap, k);
|
16
|
+
}
|
17
|
+
|
18
|
+
// fused GGML_OP_SCALE + GGML_UNARY_OP_TANH + GGML_OP_SCALE
|
19
|
+
void ggml_cuda_op_softcap(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * src) {
|
20
|
+
const ggml_tensor * src0 = src->src[0];
|
21
|
+
const float * src0_d = (const float *)src0->data;
|
22
|
+
float * dst_d = (float *)dst->data;
|
23
|
+
cudaStream_t stream = ctx.stream();
|
24
|
+
|
25
|
+
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
26
|
+
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
27
|
+
|
28
|
+
float scale;
|
29
|
+
float softcap;
|
30
|
+
memcpy(&scale, (float *) src->op_params + 0, sizeof(float));
|
31
|
+
memcpy(&softcap, (float *) dst->op_params + 0, sizeof(float));
|
32
|
+
|
33
|
+
softcap_f32_cuda(src0_d, dst_d, scale, softcap, ggml_nelements(src0), stream);
|
34
|
+
}
|