whispercpp 1.3.3 → 1.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ext/ruby_whisper_params.c +55 -25
- data/ext/sources/CMakeLists.txt +1 -1
- data/ext/sources/bindings/javascript/package.json +1 -1
- data/ext/sources/build-xcframework.sh +24 -0
- data/ext/sources/examples/CMakeLists.txt +1 -0
- data/ext/sources/examples/addon.node/addon.cpp +19 -19
- data/ext/sources/examples/addon.node/index.js +7 -5
- data/ext/sources/examples/bench/bench.cpp +26 -16
- data/ext/sources/examples/bench.wasm/index-tmpl.html +10 -9
- data/ext/sources/examples/cli/cli.cpp +4 -2
- data/ext/sources/examples/command/command.cpp +26 -24
- data/ext/sources/examples/command.wasm/index-tmpl.html +5 -4
- data/ext/sources/examples/common-ggml.cpp +2 -0
- data/ext/sources/examples/lsp/lsp.cpp +19 -17
- data/ext/sources/examples/server/server.cpp +24 -13
- data/ext/sources/examples/server.py +6 -1
- data/ext/sources/examples/stream/stream.cpp +4 -2
- data/ext/sources/examples/stream.wasm/emscripten.cpp +6 -6
- data/ext/sources/examples/stream.wasm/index-tmpl.html +82 -5
- data/ext/sources/examples/talk-llama/CMakeLists.txt +2 -2
- data/ext/sources/examples/talk-llama/llama-adapter.cpp +101 -4
- data/ext/sources/examples/talk-llama/llama-adapter.h +6 -0
- data/ext/sources/examples/talk-llama/llama-arch.cpp +588 -15
- data/ext/sources/examples/talk-llama/llama-arch.h +58 -1
- data/ext/sources/examples/talk-llama/llama-batch.cpp +103 -71
- data/ext/sources/examples/talk-llama/llama-batch.h +31 -18
- data/ext/sources/examples/talk-llama/llama-chat.cpp +120 -5
- data/ext/sources/examples/talk-llama/llama-chat.h +7 -0
- data/ext/sources/examples/talk-llama/llama-context.cpp +460 -357
- data/ext/sources/examples/talk-llama/llama-context.h +44 -29
- data/ext/sources/examples/talk-llama/llama-cparams.h +4 -4
- data/ext/sources/examples/talk-llama/llama-graph.cpp +543 -271
- data/ext/sources/examples/talk-llama/llama-graph.h +278 -168
- data/ext/sources/examples/talk-llama/llama-hparams.cpp +118 -4
- data/ext/sources/examples/talk-llama/llama-hparams.h +61 -15
- data/ext/sources/examples/talk-llama/llama-impl.h +2 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache-iswa.cpp +326 -0
- data/ext/sources/examples/talk-llama/{llama-kv-cache-unified-iswa.h → llama-kv-cache-iswa.h} +38 -29
- data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +2020 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache.h +358 -27
- data/ext/sources/examples/talk-llama/llama-kv-cells.h +80 -28
- data/ext/sources/examples/talk-llama/llama-memory-hybrid.cpp +56 -36
- data/ext/sources/examples/talk-llama/llama-memory-hybrid.h +30 -29
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.cpp +48 -19
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.h +13 -14
- data/ext/sources/examples/talk-llama/llama-memory.h +16 -10
- data/ext/sources/examples/talk-llama/llama-model-loader.cpp +2 -0
- data/ext/sources/examples/talk-llama/llama-model-loader.h +3 -2
- data/ext/sources/examples/talk-llama/llama-model.cpp +7165 -2336
- data/ext/sources/examples/talk-llama/llama-model.h +60 -9
- data/ext/sources/examples/talk-llama/llama-quant.cpp +48 -10
- data/ext/sources/examples/talk-llama/llama-sampling.cpp +226 -126
- data/ext/sources/examples/talk-llama/llama-vocab.cpp +440 -13
- data/ext/sources/examples/talk-llama/llama-vocab.h +45 -0
- data/ext/sources/examples/talk-llama/llama.cpp +65 -10
- data/ext/sources/examples/talk-llama/llama.h +95 -177
- data/ext/sources/examples/talk-llama/talk-llama.cpp +9 -6
- data/ext/sources/examples/talk-llama/unicode.cpp +207 -0
- data/ext/sources/examples/talk-llama/unicode.h +45 -0
- data/ext/sources/examples/wchess/wchess.cmd/wchess.cmd.cpp +4 -2
- data/ext/sources/examples/whisper.wasm/index-tmpl.html +17 -16
- data/ext/sources/ggml/CMakeLists.txt +59 -31
- data/ext/sources/ggml/cmake/ggml-config.cmake.in +132 -93
- data/ext/sources/ggml/include/ggml-backend.h +17 -1
- data/ext/sources/ggml/include/ggml-cpu.h +1 -1
- data/ext/sources/ggml/include/ggml-metal.h +1 -6
- data/ext/sources/ggml/include/ggml-opt.h +25 -6
- data/ext/sources/ggml/include/ggml-webgpu.h +19 -0
- data/ext/sources/ggml/include/ggml-zdnn.h +17 -0
- data/ext/sources/ggml/include/ggml.h +221 -16
- data/ext/sources/ggml/src/CMakeLists.txt +17 -2
- data/ext/sources/ggml/src/ggml-alloc.c +265 -141
- data/ext/sources/ggml/src/ggml-backend-impl.h +4 -1
- data/ext/sources/ggml/src/ggml-backend-reg.cpp +30 -13
- data/ext/sources/ggml/src/ggml-backend.cpp +221 -38
- data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +1 -1
- data/ext/sources/ggml/src/ggml-blas/ggml-blas.cpp +5 -4
- data/ext/sources/ggml/src/ggml-cann/CMakeLists.txt +14 -0
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.cpp +3 -1
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.cpp +903 -717
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +143 -25
- data/ext/sources/ggml/src/ggml-cann/common.h +143 -1
- data/ext/sources/ggml/src/ggml-cann/ggml-cann.cpp +488 -69
- data/ext/sources/ggml/src/ggml-common.h +17 -0
- data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +40 -18
- data/ext/sources/ggml/src/ggml-cpu/amx/amx.cpp +4 -2
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/quants.c +132 -596
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/repack.cpp +14 -286
- data/ext/sources/ggml/src/ggml-cpu/arch/loongarch/quants.c +103 -582
- data/ext/sources/ggml/src/ggml-cpu/arch/powerpc/quants.c +162 -589
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/quants.c +265 -437
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/repack.cpp +3 -58
- data/ext/sources/ggml/src/ggml-cpu/arch/s390/quants.c +521 -353
- data/ext/sources/ggml/src/ggml-cpu/arch/wasm/quants.c +54 -314
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/quants.c +184 -675
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/repack.cpp +4679 -1657
- data/ext/sources/ggml/src/ggml-cpu/arch-fallback.h +32 -2
- data/ext/sources/ggml/src/ggml-cpu/common.h +14 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-impl.h +13 -6
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +70 -42
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.cpp +35 -28
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +152 -18
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.h +7 -1
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +227 -97
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +474 -1116
- data/ext/sources/ggml/src/ggml-cpu/ops.cpp +1587 -1177
- data/ext/sources/ggml/src/ggml-cpu/ops.h +5 -8
- data/ext/sources/ggml/src/ggml-cpu/quants.c +35 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.h +8 -0
- data/ext/sources/ggml/src/ggml-cpu/repack.cpp +458 -47
- data/ext/sources/ggml/src/ggml-cpu/repack.h +22 -0
- data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +89 -60
- data/ext/sources/ggml/src/ggml-cpu/spacemit/ime.cpp +1024 -0
- data/ext/sources/ggml/src/ggml-cpu/spacemit/ime.h +13 -0
- data/ext/sources/ggml/src/ggml-cpu/spacemit/ime1_kernels.cpp +3196 -0
- data/ext/sources/ggml/src/ggml-cpu/spacemit/ime_kernels.h +26 -0
- data/ext/sources/ggml/src/ggml-cpu/traits.cpp +2 -2
- data/ext/sources/ggml/src/ggml-cpu/traits.h +1 -1
- data/ext/sources/ggml/src/ggml-cpu/vec.cpp +170 -26
- data/ext/sources/ggml/src/ggml-cpu/vec.h +506 -63
- data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +20 -16
- data/ext/sources/ggml/src/ggml-cuda/add-id.cu +58 -0
- data/ext/sources/ggml/src/ggml-cuda/add-id.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +330 -191
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/common.cuh +250 -63
- data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cu +1 -4
- data/ext/sources/ggml/src/ggml-cuda/conv2d.cu +166 -0
- data/ext/sources/ggml/src/ggml-cuda/conv2d.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/convert.cu +95 -22
- data/ext/sources/ggml/src/ggml-cuda/convert.cuh +15 -0
- data/ext/sources/ggml/src/ggml-cuda/cpy-utils.cuh +217 -0
- data/ext/sources/ggml/src/ggml-cuda/cpy.cu +64 -307
- data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cu +2 -14
- data/ext/sources/ggml/src/ggml-cuda/dequantize.cuh +14 -40
- data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +498 -367
- data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +137 -91
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cu +755 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec.cuh +593 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +86 -50
- data/ext/sources/ggml/src/ggml-cuda/fattn.cu +185 -198
- data/ext/sources/ggml/src/ggml-cuda/fattn.cuh +2 -0
- data/ext/sources/ggml/src/ggml-cuda/getrows.cu +50 -39
- data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +379 -107
- data/ext/sources/ggml/src/ggml-cuda/im2col.cu +196 -35
- data/ext/sources/ggml/src/ggml-cuda/im2col.cuh +1 -0
- data/ext/sources/ggml/src/ggml-cuda/mean.cu +56 -2
- data/ext/sources/ggml/src/ggml-cuda/mma.cuh +198 -45
- data/ext/sources/ggml/src/ggml-cuda/mmf.cu +123 -0
- data/ext/sources/ggml/src/ggml-cuda/mmf.cuh +496 -0
- data/ext/sources/ggml/src/ggml-cuda/mmq.cu +206 -57
- data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +1262 -721
- data/ext/sources/ggml/src/ggml-cuda/{mmv.cu → mmvf.cu} +53 -53
- data/ext/sources/ggml/src/ggml-cuda/{mmv.cuh → mmvf.cuh} +3 -3
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +64 -73
- data/ext/sources/ggml/src/ggml-cuda/norm.cu +284 -12
- data/ext/sources/ggml/src/ggml-cuda/norm.cuh +7 -0
- data/ext/sources/ggml/src/ggml-cuda/opt-step-sgd.cu +49 -0
- data/ext/sources/ggml/src/ggml-cuda/opt-step-sgd.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/pad.cu +46 -23
- data/ext/sources/ggml/src/ggml-cuda/pad_reflect_1d.cu +91 -0
- data/ext/sources/ggml/src/ggml-cuda/pad_reflect_1d.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/quantize.cu +12 -10
- data/ext/sources/ggml/src/ggml-cuda/reduce_rows.cuh +53 -0
- data/ext/sources/ggml/src/ggml-cuda/roll.cu +67 -0
- data/ext/sources/ggml/src/ggml-cuda/roll.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/rope.cu +21 -27
- data/ext/sources/ggml/src/ggml-cuda/scale.cu +14 -11
- data/ext/sources/ggml/src/ggml-cuda/set-rows.cu +276 -0
- data/ext/sources/ggml/src/ggml-cuda/set-rows.cuh +7 -0
- data/ext/sources/ggml/src/ggml-cuda/softcap.cu +34 -0
- data/ext/sources/ggml/src/ggml-cuda/softcap.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/softmax.cu +126 -59
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +10 -2
- data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cu +322 -100
- data/ext/sources/ggml/src/ggml-cuda/sum.cu +6 -10
- data/ext/sources/ggml/src/ggml-cuda/sumrows.cu +21 -4
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-f16.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q4_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q4_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q5_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q5_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-f16-q8_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-f16.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q4_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q4_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q5_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q5_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_0-q8_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-f16.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q4_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q4_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q5_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q5_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q4_1-q8_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-f16.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q4_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q4_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q5_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q5_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_0-q8_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-f16.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q4_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q4_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q5_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q5_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q5_1-q8_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-f16.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q4_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q4_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q5_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q5_1.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-instance-q8_0-q8_0.cu +7 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +21 -18
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_10.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_11.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_12.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_13.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_14.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_15.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_2.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_3.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_4.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_5.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_6.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_7.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_8.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmf-instance-ncols_9.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-mxfp4.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cu +259 -0
- data/ext/sources/ggml/src/ggml-cuda/topk-moe.cuh +14 -0
- data/ext/sources/ggml/src/ggml-cuda/tsembd.cu +3 -3
- data/ext/sources/ggml/src/ggml-cuda/unary.cu +90 -0
- data/ext/sources/ggml/src/ggml-cuda/unary.cuh +8 -0
- data/ext/sources/ggml/src/ggml-cuda/upscale.cu +92 -6
- data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +110 -22
- data/ext/sources/ggml/src/ggml-cuda/vendors/cuda.h +4 -0
- data/ext/sources/ggml/src/ggml-cuda/vendors/hip.h +58 -36
- data/ext/sources/ggml/src/ggml-cuda/vendors/musa.h +4 -3
- data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +10 -2
- data/ext/sources/ggml/src/ggml-impl.h +119 -9
- data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +10 -7
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-common.cpp +446 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-common.h +52 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.h +33 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-context.m +600 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.cpp +1376 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.h +226 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-device.m +1308 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +136 -63
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.cpp +3158 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-ops.h +82 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.cpp +718 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.metal +2854 -1503
- data/ext/sources/ggml/src/ggml-musa/CMakeLists.txt +18 -8
- data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +18 -0
- data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +2510 -242
- data/ext/sources/ggml/src/ggml-opencl/kernels/add.cl +107 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/add_id.cl +42 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/conv2d.cl +185 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/conv2d_f16_f32.cl +176 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +84 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/div.cl +66 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/flash_attn_f16.cl +370 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/flash_attn_f32.cl +370 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/flash_attn_f32_f16.cl +373 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gelu.cl +27 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/glu.cl +177 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/group_norm.cl +49 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f16.cl +1 -1
- data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f32.cl +1 -1
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul.cl +73 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mat_f16_f32.cl +130 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f16_f32_l4_lm.cl +132 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mm_f32_f32_l4_lm.cl +133 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32.cl +189 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32_flat.cl +176 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_q8_0_f32.cl +140 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_q8_0_f32_flat.cl +222 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_mxfp4_f32.cl +144 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_mxfp4_f32_flat.cl +167 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q8_0_f32.cl +125 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q8_0_f32_flat.cl +202 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/norm.cl +80 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/rms_norm.cl +79 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +3 -2
- data/ext/sources/ggml/src/ggml-opencl/kernels/set_rows.cl +189 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f16.cl +34 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f32.cl +34 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f16.cl +34 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f32.cl +34 -13
- data/ext/sources/ggml/src/ggml-opencl/kernels/sub.cl +66 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +20 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/tsembd.cl +2 -2
- data/ext/sources/ggml/src/ggml-opencl/kernels/upscale.cl +2 -3
- data/ext/sources/ggml/src/ggml-opt.cpp +97 -41
- data/ext/sources/ggml/src/ggml-quants.c +111 -16
- data/ext/sources/ggml/src/ggml-quants.h +6 -0
- data/ext/sources/ggml/src/ggml-rpc/ggml-rpc.cpp +67 -47
- data/ext/sources/ggml/src/ggml-sycl/backend.hpp +2 -0
- data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +15 -5
- data/ext/sources/ggml/src/ggml-sycl/binbcast.hpp +6 -0
- data/ext/sources/ggml/src/ggml-sycl/concat.cpp +25 -16
- data/ext/sources/ggml/src/ggml-sycl/conv.cpp +10 -4
- data/ext/sources/ggml/src/ggml-sycl/convert.cpp +166 -99
- data/ext/sources/ggml/src/ggml-sycl/cpy.cpp +72 -306
- data/ext/sources/ggml/src/ggml-sycl/cpy.hpp +213 -1
- data/ext/sources/ggml/src/ggml-sycl/dmmv.cpp +67 -49
- data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +1 -31
- data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +79 -29
- data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +2 -0
- data/ext/sources/ggml/src/ggml-sycl/gemm.hpp +14 -26
- data/ext/sources/ggml/src/ggml-sycl/getrows.cpp +9 -6
- data/ext/sources/ggml/src/ggml-sycl/ggml-sycl.cpp +328 -323
- data/ext/sources/ggml/src/ggml-sycl/gla.cpp +2 -2
- data/ext/sources/ggml/src/ggml-sycl/im2col.cpp +2 -2
- data/ext/sources/ggml/src/ggml-sycl/mmq.cpp +80 -60
- data/ext/sources/ggml/src/ggml-sycl/mmvq.cpp +201 -132
- data/ext/sources/ggml/src/ggml-sycl/norm.cpp +74 -55
- data/ext/sources/ggml/src/ggml-sycl/quantize.hpp +133 -0
- data/ext/sources/ggml/src/ggml-sycl/quants.hpp +8 -9
- data/ext/sources/ggml/src/ggml-sycl/rope.cpp +35 -42
- data/ext/sources/ggml/src/ggml-sycl/set_rows.cpp +234 -0
- data/ext/sources/ggml/src/ggml-sycl/set_rows.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/softmax.cpp +3 -3
- data/ext/sources/ggml/src/ggml-sycl/tsembd.cpp +12 -6
- data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +2 -6
- data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +16 -12
- data/ext/sources/ggml/src/ggml-vulkan/ggml-vulkan.cpp +3492 -883
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +41 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add_id.comp +42 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +13 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +39 -29
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp +349 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +2 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +66 -12
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +154 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +2 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +6 -5
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +4 -2
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_mxfp4.comp +32 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/exp.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +69 -24
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.comp +60 -20
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +98 -42
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +64 -27
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +74 -13
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu_erf.comp +27 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu_quick.comp +11 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_erf.comp +39 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp +4 -17
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +19 -10
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +25 -15
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/glu_head.comp +4 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/hardsigmoid.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/hardswish.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +18 -14
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col_3d.comp +126 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +65 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +11 -7
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vecq.comp +140 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +144 -531
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +206 -38
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_funcs.comp +556 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +12 -5
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +15 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/multi_add.comp +111 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_sgd.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +24 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +53 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +55 -11
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_partials.comp +65 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/roll.comp +46 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +1 -4
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +7 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +7 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +7 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rte.comp +5 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +1 -1
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +29 -7
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +4 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sqrt.comp +17 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +38 -5
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/swiglu_oai.comp +14 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +4 -3
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +101 -9
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +69 -5
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/utils.comp +25 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +335 -77
- data/ext/sources/ggml/src/ggml-webgpu/CMakeLists.txt +54 -0
- data/ext/sources/ggml/src/ggml-webgpu/ggml-webgpu.cpp +1558 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/add.tmpl.wgsl +44 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/add_in_place.tmpl.wgsl +41 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/binary_head.tmpl +45 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/common_decls.tmpl +930 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/cpy.wgsl +60 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/embed_wgsl.py +124 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/get_rows.tmpl.wgsl +874 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/memset.wgsl +40 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul.tmpl.wgsl +44 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_in_place.tmpl.wgsl +41 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/mul_mat.tmpl.wgsl +907 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm.wgsl +57 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm_in_place.wgsl +48 -0
- data/ext/sources/ggml/src/ggml-webgpu/wgsl-shaders/set_rows.wgsl +81 -0
- data/ext/sources/ggml/src/ggml-zdnn/CMakeLists.txt +36 -0
- data/ext/sources/ggml/src/ggml-zdnn/common.hpp +59 -0
- data/ext/sources/ggml/src/ggml-zdnn/ggml-zdnn.cpp +628 -0
- data/ext/sources/ggml/src/ggml-zdnn/mmf.cpp +80 -0
- data/ext/sources/ggml/src/ggml-zdnn/mmf.hpp +12 -0
- data/ext/sources/ggml/src/ggml-zdnn/utils.cpp +79 -0
- data/ext/sources/ggml/src/ggml-zdnn/utils.hpp +19 -0
- data/ext/sources/ggml/src/ggml.c +478 -98
- data/ext/sources/ggml/src/gguf.cpp +8 -1
- data/ext/sources/src/whisper.cpp +23 -46
- data/ext/sources/tests/CMakeLists.txt +8 -1
- data/ext/sources/tests/test-vad-full.cpp +3 -3
- data/ext/sources/tests/test-vad.cpp +2 -2
- data/lib/whisper/model/uri.rb +1 -1
- data/sig/whisper.rbs +7 -0
- data/test/test_params.rb +8 -0
- data/test/test_whisper.rb +1 -1
- data/whispercpp.gemspec +1 -1
- metadata +164 -157
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified-iswa.cpp +0 -279
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified.cpp +0 -1841
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified.h +0 -303
- data/ext/sources/ggml/include/ggml-kompute.h +0 -50
- data/ext/sources/ggml/src/ggml-amx/CMakeLists.txt +0 -107
- data/ext/sources/ggml/src/ggml-amx/common.h +0 -94
- data/ext/sources/ggml/src/ggml-amx/ggml-amx.cpp +0 -446
- data/ext/sources/ggml/src/ggml-amx/mmq.cpp +0 -2510
- data/ext/sources/ggml/src/ggml-amx/mmq.h +0 -17
- data/ext/sources/ggml/src/ggml-cann/kernels/CMakeLists.txt +0 -30
- data/ext/sources/ggml/src/ggml-cann/kernels/ascendc_kernels.h +0 -19
- data/ext/sources/ggml/src/ggml-cann/kernels/dup.cpp +0 -234
- data/ext/sources/ggml/src/ggml-cann/kernels/get_row_f16.cpp +0 -197
- data/ext/sources/ggml/src/ggml-cann/kernels/get_row_f32.cpp +0 -190
- data/ext/sources/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +0 -204
- data/ext/sources/ggml/src/ggml-cann/kernels/get_row_q8_0.cpp +0 -191
- data/ext/sources/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +0 -218
- data/ext/sources/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +0 -216
- data/ext/sources/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +0 -295
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cu +0 -357
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cuh +0 -3
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cu +0 -365
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cuh +0 -3
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f16.cuh +0 -482
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f32.cuh +0 -472
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +0 -5
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +0 -5
- data/ext/sources/ggml/src/ggml-kompute/CMakeLists.txt +0 -166
- data/ext/sources/ggml/src/ggml-kompute/ggml-kompute.cpp +0 -2251
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/common.comp +0 -112
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_add.comp +0 -58
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_addrow.comp +0 -25
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f16.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f32.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f16.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f32.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_diagmask.comp +0 -30
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_gelu.comp +0 -22
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows.comp +0 -17
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f16.comp +0 -31
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f32.comp +0 -31
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_0.comp +0 -38
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_1.comp +0 -39
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q6_k.comp +0 -44
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp +0 -69
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_mat_f32.comp +0 -51
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_0.comp +0 -33
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_1.comp +0 -35
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp +0 -140
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp +0 -106
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q8_0.comp +0 -73
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp +0 -28
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_norm.comp +0 -84
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_relu.comp +0 -21
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rmsnorm.comp +0 -53
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp +0 -52
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale.comp +0 -19
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale_8.comp +0 -23
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_silu.comp +0 -22
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp +0 -72
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp +0 -71
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.m +0 -6280
@@ -0,0 +1,1024 @@
|
|
1
|
+
#define GGML_COMMON_IMPL_CPP
|
2
|
+
#define GGML_COMMON_DECL_CPP
|
3
|
+
|
4
|
+
#include "ime.h"
|
5
|
+
|
6
|
+
#include "ggml-backend-impl.h"
|
7
|
+
#include "ggml-common.h"
|
8
|
+
#include "ggml-cpu.h"
|
9
|
+
#include "ime_kernels.h"
|
10
|
+
#include "traits.h"
|
11
|
+
|
12
|
+
#include <algorithm>
|
13
|
+
#include <cassert>
|
14
|
+
#include <cmath>
|
15
|
+
#include <cstdio> // for GGML_ASSERT
|
16
|
+
#include <stdexcept>
|
17
|
+
#include <thread>
|
18
|
+
|
19
|
+
// clang-format off
|
20
|
+
#if defined(__riscv)
|
21
|
+
|
22
|
+
#if !defined(__riscv_v) || !defined(__riscv_v_intrinsic)
|
23
|
+
#error "riscv v extension or v_intrinsic not enabled"
|
24
|
+
#else
|
25
|
+
#include <riscv_vector.h>
|
26
|
+
#endif
|
27
|
+
|
28
|
+
#if !defined(__riscv_zfh)
|
29
|
+
#error "riscv zfh extension not enabled"
|
30
|
+
#endif
|
31
|
+
|
32
|
+
#if defined(RISCV64_SPACEMIT_IME1)
|
33
|
+
#else
|
34
|
+
#error "RISCV64_SPACEMIT_IME1 not defined"
|
35
|
+
#endif
|
36
|
+
|
37
|
+
#else
|
38
|
+
|
39
|
+
#error "riscv not enabled in this build"
|
40
|
+
|
41
|
+
#endif
|
42
|
+
|
43
|
+
#if defined(__GNUC__)
|
44
|
+
#pragma GCC diagnostic ignored "-Woverlength-strings"
|
45
|
+
#pragma GCC diagnostic ignored "-Wcast-qual"
|
46
|
+
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
47
|
+
#endif
|
48
|
+
|
49
|
+
#if defined(RISCV64_SPACEMIT_IME1)
|
50
|
+
#define QGEMM_STRIDEN_THREAD_ALIGN 16
|
51
|
+
#else
|
52
|
+
#define QGEMM_STRIDEN_THREAD_ALIGN 32
|
53
|
+
#endif
|
54
|
+
|
55
|
+
// clang-format on
|
56
|
+
|
57
|
+
struct qnbitgemm_spacemit_ime_args {
|
58
|
+
const float * a_ptr = nullptr;
|
59
|
+
size_t lda = 0;
|
60
|
+
const std::byte * packed_quant_b_data = nullptr;
|
61
|
+
const float * quant_b_scale = nullptr;
|
62
|
+
const void * quant_b_zp = nullptr;
|
63
|
+
const float * quant_b_blksum = nullptr;
|
64
|
+
const float * bias = nullptr;
|
65
|
+
float * c_ptr = nullptr;
|
66
|
+
size_t ldc = 0;
|
67
|
+
};
|
68
|
+
|
69
|
+
constexpr size_t div_round_up(size_t up, size_t down) {
|
70
|
+
return (up + down - 1) / down;
|
71
|
+
}
|
72
|
+
|
73
|
+
constexpr size_t q8_blk_size(size_t blk_len) {
|
74
|
+
const size_t blk_size = sizeof(float) + blk_len * sizeof(int8_t);
|
75
|
+
// Currently, the strictest alignment requirement of a block is for a float.
|
76
|
+
// Ensure contiguous blocks are suitably aligned.
|
77
|
+
assert(blk_size % alignof(float) == 0);
|
78
|
+
return blk_size;
|
79
|
+
}
|
80
|
+
|
81
|
+
namespace ggml::cpu::riscv64_spacemit {
|
82
|
+
|
83
|
+
const int num_ai_cores = std::thread::hardware_concurrency() / 2;
|
84
|
+
|
85
|
+
} // namespace ggml::cpu::riscv64_spacemit
|
86
|
+
|
87
|
+
static void sqnbitgemm_spacemit_ime_i8i4(const size_t blk_len,
|
88
|
+
const size_t gemm_k,
|
89
|
+
const qnbitgemm_spacemit_ime_args * gemm_args,
|
90
|
+
void * const per_gemm_ws,
|
91
|
+
const size_t m_start,
|
92
|
+
const size_t m_count,
|
93
|
+
const size_t n_start,
|
94
|
+
const size_t n_count) {
|
95
|
+
constexpr size_t scale_stride = sizeof(uint16_t);
|
96
|
+
constexpr size_t blk_bitwidth = 4;
|
97
|
+
|
98
|
+
const size_t k_blks = div_round_up(gemm_k, blk_len);
|
99
|
+
|
100
|
+
const size_t lda = k_blks * q8_blk_size(blk_len);
|
101
|
+
const size_t ldc = gemm_args->ldc;
|
102
|
+
const size_t ldb = k_blks * (blk_len * blk_bitwidth / 8);
|
103
|
+
const std::byte * quant_a_ptr = static_cast<const std::byte *>(per_gemm_ws) + m_start * lda;
|
104
|
+
|
105
|
+
const size_t zero_point_stride = gemm_args->quant_b_zp != nullptr ? sizeof(uint8_t) : 0;
|
106
|
+
const size_t packed_b_stride = ldb + k_blks * (scale_stride + zero_point_stride);
|
107
|
+
const std::byte * packed_quant_b_data = gemm_args->packed_quant_b_data + n_start * packed_b_stride;
|
108
|
+
|
109
|
+
float * c_ptr = gemm_args->c_ptr + m_start * ldc + n_start;
|
110
|
+
|
111
|
+
size_t count_n = 0;
|
112
|
+
const size_t compute_block_count_n = m_count == 1 ? n_count : 16;
|
113
|
+
for (size_t n = 0; n < n_count; n += count_n) {
|
114
|
+
count_n = std::min(n_count - n, compute_block_count_n);
|
115
|
+
|
116
|
+
const std::byte * a_row = quant_a_ptr;
|
117
|
+
const std::byte * b_col = packed_quant_b_data + n * packed_b_stride;
|
118
|
+
const std::byte * b_col_zp = (zero_point_stride != 0) ? b_col : nullptr;
|
119
|
+
float * c_blk = c_ptr + n;
|
120
|
+
|
121
|
+
int32_t rows_remaining = m_count;
|
122
|
+
|
123
|
+
while (rows_remaining > 0) {
|
124
|
+
const auto rows_handled = sqnbitgemm_spacemit_ime::ime1::gemm_kernel_i8i4(
|
125
|
+
blk_len, a_row, b_col, nullptr, b_col_zp, c_blk, rows_remaining, count_n, gemm_k, k_blks, ldc, nullptr,
|
126
|
+
scale_stride);
|
127
|
+
|
128
|
+
c_blk += rows_handled * ldc;
|
129
|
+
a_row += rows_handled * lda;
|
130
|
+
|
131
|
+
rows_remaining -= rows_handled;
|
132
|
+
}
|
133
|
+
}
|
134
|
+
}
|
135
|
+
|
136
|
+
template <int K> constexpr int QK_0() {
|
137
|
+
if constexpr (K == 4) {
|
138
|
+
return QK4_0;
|
139
|
+
}
|
140
|
+
if constexpr (K == 8) {
|
141
|
+
return QK8_0;
|
142
|
+
}
|
143
|
+
return -1;
|
144
|
+
}
|
145
|
+
|
146
|
+
template <int K, int N> struct block {
|
147
|
+
ggml_half d[N]; // deltas for N qK_0 blocks
|
148
|
+
uint8_t qs[(QK_0<K>() * N * K) / 8]; // quants for N qK_0 blocks
|
149
|
+
};
|
150
|
+
|
151
|
+
template <int K, int N> struct block_with_zp {
|
152
|
+
ggml_half d[N]; // deltas for N qK_1 blocks
|
153
|
+
uint8_t zp[N]; // zero points for N qK_1 blocks
|
154
|
+
uint8_t qs[(QK_0<K>() * N * K) / 8]; // quants for N qK_1 blocks
|
155
|
+
};
|
156
|
+
|
157
|
+
// control size
|
158
|
+
static_assert(sizeof(block<4, 16>) == 16 * sizeof(ggml_half) + QK4_0 * 8, "wrong block<4,16> size/padding");
|
159
|
+
static_assert(sizeof(block_with_zp<4, 16>) == 16 * sizeof(ggml_half) + QK4_0 * 8 + 16 * sizeof(uint8_t),
|
160
|
+
"wrong block_with_zp<4,16> size/padding");
|
161
|
+
static_assert(sizeof(block<8, 16>) == 16 * sizeof(ggml_half) + QK4_0 * 16, "wrong block<8,16> size/padding");
|
162
|
+
|
163
|
+
using block_q4_0x16 = block<4, 16>;
|
164
|
+
using block_q4_1x16 = block_with_zp<4, 16>;
|
165
|
+
using block_q8_0x16 = block<8, 16>;
|
166
|
+
|
167
|
+
static block_q4_0x16 make_block_q4_0x16(block_q4_0 * in, unsigned int blck_size_interleave) {
|
168
|
+
block_q4_0x16 out;
|
169
|
+
GGML_ASSERT(QK4_0 / blck_size_interleave == 2);
|
170
|
+
|
171
|
+
for (int i = 0; i < 16; i++) {
|
172
|
+
out.d[i] = in[i].d;
|
173
|
+
}
|
174
|
+
|
175
|
+
for (int i = 0; i < 16; i++) {
|
176
|
+
// [0, 15], in.d & 0x0F
|
177
|
+
for (int j = 0; j < QK4_0 / 4; j++) {
|
178
|
+
//src [b0 b16] ......... [b8 b24] ......... [b15 b31]
|
179
|
+
//dst [b0 b8] ......... [b7 b15]
|
180
|
+
out.qs[i * QK4_0 / 4 + j] = (in[i].qs[j] & 0x0F) | ((in[i].qs[j + QK4_0 / 4] & 0x0F) << 4);
|
181
|
+
}
|
182
|
+
}
|
183
|
+
|
184
|
+
for (int i = 0; i < 16; i++) {
|
185
|
+
// [16, 31], in.d & 0xF0
|
186
|
+
for (int j = 0; j < QK4_0 / 4; j++) {
|
187
|
+
//src [b0 b16] ......... [b8 b24] ......... [b15 b31]
|
188
|
+
//dst [b16 b24] ......... [b23 b31]
|
189
|
+
out.qs[4 * QK4_0 + i * QK4_0 / 4 + j] = ((in[i].qs[j] & 0xF0) >> 4) | (in[i].qs[j + QK4_0 / 4] & 0xF0);
|
190
|
+
}
|
191
|
+
}
|
192
|
+
|
193
|
+
return out;
|
194
|
+
}
|
195
|
+
|
196
|
+
static block_q4_1x16 make_block_q4_1x16(block_q4_1 * in, unsigned int blck_size_interleave) {
|
197
|
+
block_q4_1x16 out;
|
198
|
+
GGML_ASSERT(QK4_1 / blck_size_interleave == 2);
|
199
|
+
|
200
|
+
for (int i = 0; i < 16; i++) {
|
201
|
+
float d = GGML_FP16_TO_FP32(in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d);
|
202
|
+
float m = GGML_FP16_TO_FP32(in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.m);
|
203
|
+
float mid = -std::nearbyintf(m / d);
|
204
|
+
mid = std::min(15.0f, std::max(0.0f, mid));
|
205
|
+
out.d[i] = GGML_FP32_TO_FP16(d);
|
206
|
+
out.zp[i] = static_cast<uint8_t>(mid);
|
207
|
+
}
|
208
|
+
|
209
|
+
for (int i = 0; i < 16; i++) {
|
210
|
+
// [0, 15], in.d & 0x0F
|
211
|
+
for (int j = 0; j < QK4_1 / 4; j++) {
|
212
|
+
//src [b0 b16] ......... [b8 b24] ......... [b15 b31]
|
213
|
+
//dst [b0 b8] ......... [b7 b15]
|
214
|
+
out.qs[i * QK4_1 / 4 + j] = (in[i].qs[j] & 0x0F) | ((in[i].qs[j + QK4_1 / 4] & 0x0F) << 4);
|
215
|
+
}
|
216
|
+
}
|
217
|
+
|
218
|
+
for (int i = 0; i < 16; i++) {
|
219
|
+
// [16, 31], in.d & 0xF0
|
220
|
+
for (int j = 0; j < QK4_1 / 4; j++) {
|
221
|
+
//src [b0 b16] ......... [b8 b24] ......... [b15 b31]
|
222
|
+
//dst [b16 b24] ......... [b23 b31]
|
223
|
+
out.qs[4 * QK4_1 + i * QK4_1 / 4 + j] = ((in[i].qs[j] & 0xF0) >> 4) | (in[i].qs[j + QK4_1 / 4] & 0xF0);
|
224
|
+
}
|
225
|
+
}
|
226
|
+
|
227
|
+
return out;
|
228
|
+
}
|
229
|
+
|
230
|
+
static int repack_q4_0_to_q4_0_16_bl(struct ggml_tensor * t,
|
231
|
+
int interleave_block,
|
232
|
+
const void * GGML_RESTRICT data,
|
233
|
+
size_t data_size) {
|
234
|
+
GGML_ASSERT(t->type == GGML_TYPE_Q4_0);
|
235
|
+
GGML_ASSERT(interleave_block == 16);
|
236
|
+
|
237
|
+
constexpr int nrows_interleaved = 16;
|
238
|
+
|
239
|
+
block_q4_0x16 * dst = (block_q4_0x16 *) t->data;
|
240
|
+
const block_q4_0 * src = (const block_q4_0 *) data;
|
241
|
+
block_q4_0 dst_tmp[16];
|
242
|
+
int nrow = ggml_nrows(t);
|
243
|
+
int nblocks = t->ne[0] / QK4_0;
|
244
|
+
|
245
|
+
GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0));
|
246
|
+
|
247
|
+
if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % QK4_0 != 0) {
|
248
|
+
return -1;
|
249
|
+
}
|
250
|
+
|
251
|
+
for (int b = 0; b < nrow; b += nrows_interleaved) {
|
252
|
+
for (int64_t x = 0; x < nblocks; x++) {
|
253
|
+
for (int i = 0; i < nrows_interleaved; i++) {
|
254
|
+
dst_tmp[i] = src[x + i * nblocks];
|
255
|
+
}
|
256
|
+
*dst++ = make_block_q4_0x16(dst_tmp, interleave_block);
|
257
|
+
}
|
258
|
+
src += nrows_interleaved * nblocks;
|
259
|
+
}
|
260
|
+
return 0;
|
261
|
+
|
262
|
+
GGML_UNUSED(data_size);
|
263
|
+
}
|
264
|
+
|
265
|
+
static int repack_q4_1_to_q4_1_16_bl(struct ggml_tensor * t,
|
266
|
+
int interleave_block,
|
267
|
+
const void * GGML_RESTRICT data,
|
268
|
+
size_t data_size) {
|
269
|
+
GGML_ASSERT(t->type == GGML_TYPE_Q4_1);
|
270
|
+
GGML_ASSERT(interleave_block == 16);
|
271
|
+
|
272
|
+
constexpr int nrows_interleaved = 16;
|
273
|
+
|
274
|
+
block_q4_1x16 * dst = (block_q4_1x16 *) t->data;
|
275
|
+
const block_q4_1 * src = (const block_q4_1 *) data;
|
276
|
+
block_q4_1 dst_tmp[16];
|
277
|
+
int nrow = ggml_nrows(t);
|
278
|
+
int nblocks = t->ne[0] / QK4_1;
|
279
|
+
|
280
|
+
GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_1));
|
281
|
+
|
282
|
+
if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % QK4_1 != 0) {
|
283
|
+
return -1;
|
284
|
+
}
|
285
|
+
|
286
|
+
for (int b = 0; b < nrow; b += nrows_interleaved) {
|
287
|
+
for (int64_t x = 0; x < nblocks; x++) {
|
288
|
+
for (int i = 0; i < nrows_interleaved; i++) {
|
289
|
+
dst_tmp[i] = src[x + i * nblocks];
|
290
|
+
}
|
291
|
+
*dst++ = make_block_q4_1x16(dst_tmp, interleave_block);
|
292
|
+
}
|
293
|
+
src += nrows_interleaved * nblocks;
|
294
|
+
}
|
295
|
+
return 0;
|
296
|
+
|
297
|
+
GGML_UNUSED(data_size);
|
298
|
+
}
|
299
|
+
|
300
|
+
static inline void get_scale_min_k4(int j,
|
301
|
+
const uint8_t * GGML_RESTRICT q,
|
302
|
+
uint8_t * GGML_RESTRICT d,
|
303
|
+
uint8_t * GGML_RESTRICT m) {
|
304
|
+
if (j < 4) {
|
305
|
+
*d = q[j] & 63;
|
306
|
+
*m = q[j + 4] & 63;
|
307
|
+
} else {
|
308
|
+
*d = (q[j + 4] & 0xF) | ((q[j - 4] >> 6) << 4);
|
309
|
+
*m = (q[j + 4] >> 4) | ((q[j - 0] >> 6) << 4);
|
310
|
+
}
|
311
|
+
}
|
312
|
+
|
313
|
+
static int repack_q4_k_to_q4_1_16_bl(struct ggml_tensor * t,
|
314
|
+
int interleave_block,
|
315
|
+
const void * GGML_RESTRICT data,
|
316
|
+
size_t data_size) {
|
317
|
+
GGML_ASSERT(t->type == GGML_TYPE_Q4_K);
|
318
|
+
GGML_ASSERT(interleave_block == 16);
|
319
|
+
GGML_ASSERT(QK_K / QK4_1 == 8);
|
320
|
+
|
321
|
+
constexpr int nrows_interleaved = 16;
|
322
|
+
|
323
|
+
block_q4_1x16 * dst = (block_q4_1x16 *) t->data;
|
324
|
+
const block_q4_K * src = (const block_q4_K *) data;
|
325
|
+
block_q4_1 dst_tmp[16];
|
326
|
+
int nrow = ggml_nrows(t);
|
327
|
+
int nblocks = t->ne[0] / QK_K;
|
328
|
+
|
329
|
+
if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % QK_K != 0) {
|
330
|
+
return -1;
|
331
|
+
}
|
332
|
+
|
333
|
+
for (int b = 0; b < nrow; b += nrows_interleaved) {
|
334
|
+
for (int64_t x = 0; x < nblocks; x++) {
|
335
|
+
for (int j = 0; j < 8; j++) {
|
336
|
+
for (int i = 0; i < nrows_interleaved; i++) {
|
337
|
+
uint8_t sc, m;
|
338
|
+
const float d = GGML_FP16_TO_FP32(src[x + i * nblocks].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d);
|
339
|
+
const float min =
|
340
|
+
GGML_FP16_TO_FP32(src[x + i * nblocks].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.dmin);
|
341
|
+
get_scale_min_k4(j, src[x + i * nblocks].scales, &sc, &m);
|
342
|
+
const float d1 = d * sc;
|
343
|
+
const float m1 = min * m;
|
344
|
+
|
345
|
+
dst_tmp[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d = GGML_FP32_TO_FP16(d1);
|
346
|
+
dst_tmp[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.m = GGML_FP32_TO_FP16(-m1);
|
347
|
+
// src -> [b0, b32] [b1, b33] ... [b31, b63]
|
348
|
+
// dst -> [b0, b16] [b1, b17] ... [b15, b31] [b32, b48] [b33, b49] ... [b47, b63]
|
349
|
+
const uint8_t * q = src[x + i * nblocks].qs + (j / 2) * QK4_1;
|
350
|
+
if (j % 2 == 0) {
|
351
|
+
for (int ii = 0; ii < 16; ii++) {
|
352
|
+
dst_tmp[i].qs[ii] = (q[ii] & 0x0F) | ((q[ii + 16] & 0x0F) << 4);
|
353
|
+
}
|
354
|
+
} else {
|
355
|
+
for (int ii = 0; ii < 16; ii++) {
|
356
|
+
dst_tmp[i].qs[ii] = ((q[ii] & 0xF0) >> 4) | (q[ii + 16] & 0xF0);
|
357
|
+
}
|
358
|
+
}
|
359
|
+
}
|
360
|
+
*dst++ = make_block_q4_1x16(dst_tmp, interleave_block);
|
361
|
+
}
|
362
|
+
}
|
363
|
+
src += nrows_interleaved * nblocks;
|
364
|
+
}
|
365
|
+
return 0;
|
366
|
+
|
367
|
+
GGML_UNUSED(data_size);
|
368
|
+
}
|
369
|
+
|
370
|
+
namespace ggml::cpu::riscv64_spacemit {
|
371
|
+
|
372
|
+
template <typename BLOC_TYPE, int64_t INTER_SIZE, int64_t NB_COLS>
|
373
|
+
int repack(struct ggml_tensor *, const void *, size_t);
|
374
|
+
|
375
|
+
template <> int repack<block_q4_0, 8, 16>(struct ggml_tensor * t, const void * data, size_t data_size) {
|
376
|
+
return repack_q4_0_to_q4_0_16_bl(t, 16, data, data_size);
|
377
|
+
}
|
378
|
+
|
379
|
+
template <> int repack<block_q4_1, 8, 16>(struct ggml_tensor * t, const void * data, size_t data_size) {
|
380
|
+
return repack_q4_1_to_q4_1_16_bl(t, 16, data, data_size);
|
381
|
+
}
|
382
|
+
|
383
|
+
template <> int repack<block_q4_K, 8, 16>(struct ggml_tensor * t, const void * data, size_t data_size) {
|
384
|
+
return repack_q4_k_to_q4_1_16_bl(t, 16, data, data_size);
|
385
|
+
}
|
386
|
+
|
387
|
+
class tensor_traits_base : public ggml::cpu::tensor_traits {
|
388
|
+
public:
|
389
|
+
virtual int repack(struct ggml_tensor * t, const void * data, size_t data_size) = 0;
|
390
|
+
};
|
391
|
+
|
392
|
+
template <typename BLOC_TYPE, int64_t INTER_SIZE, int64_t NB_COLS> class tensor_traits : public tensor_traits_base {
|
393
|
+
bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override {
|
394
|
+
switch (op->op) {
|
395
|
+
case GGML_OP_MUL_MAT:
|
396
|
+
size = ggml_row_size(GGML_TYPE_Q8_0, ggml_nelements(op->src[1])) * 4;
|
397
|
+
size = ((size + QK4_0 - 1) / QK4_0) * (QK4_0 * sizeof(float) + sizeof(float));
|
398
|
+
return true;
|
399
|
+
default:
|
400
|
+
// GGML_ABORT("fatal error");
|
401
|
+
break;
|
402
|
+
}
|
403
|
+
return false;
|
404
|
+
}
|
405
|
+
|
406
|
+
bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override {
|
407
|
+
switch (op->op) {
|
408
|
+
case GGML_OP_MUL_MAT:
|
409
|
+
if (op->src[0]->type == GGML_TYPE_Q4_0 || //
|
410
|
+
op->src[0]->type == GGML_TYPE_Q4_1 || //
|
411
|
+
op->src[0]->type == GGML_TYPE_Q4_K) {
|
412
|
+
forward_mul_mat_q4(params, op);
|
413
|
+
return true;
|
414
|
+
}
|
415
|
+
default:
|
416
|
+
// GGML_ABORT("fatal error");
|
417
|
+
break;
|
418
|
+
}
|
419
|
+
return false;
|
420
|
+
}
|
421
|
+
|
422
|
+
void forward_mul_mat_q4(ggml_compute_params * params, ggml_tensor * op) {
|
423
|
+
const ggml_tensor * src0 = op->src[0];
|
424
|
+
const ggml_tensor * src1 = op->src[1];
|
425
|
+
ggml_tensor * dst = op;
|
426
|
+
|
427
|
+
GGML_TENSOR_BINARY_OP_LOCALS
|
428
|
+
|
429
|
+
int ith = params->ith;
|
430
|
+
int nth = params->nth;
|
431
|
+
|
432
|
+
[[maybe_unused]] const enum ggml_type type = src0->type;
|
433
|
+
|
434
|
+
void * w_data = (void *) src0->data;
|
435
|
+
const float * feature = (const float *) src1->data;
|
436
|
+
float * output = (float *) dst->data;
|
437
|
+
|
438
|
+
const size_t batch_feature = ne12 * ne13;
|
439
|
+
[[maybe_unused]] const size_t batch_weight = ne02 * ne03;
|
440
|
+
const size_t gemm_m = ne11;
|
441
|
+
const size_t gemm_k = ne10;
|
442
|
+
const size_t gemm_n = ne01;
|
443
|
+
|
444
|
+
GGML_ASSERT(batch_weight == 1);
|
445
|
+
|
446
|
+
const size_t block_count_k = div_round_up(gemm_k, QK4_0);
|
447
|
+
const size_t per_gemm_workspace_size = gemm_m * block_count_k * q8_blk_size(QK4_0);
|
448
|
+
const size_t per_gemm_workspace_stride =
|
449
|
+
div_round_up(per_gemm_workspace_size, alignof(uint64_t)) * alignof(uint64_t);
|
450
|
+
const size_t gemm_workspace_size = batch_feature * per_gemm_workspace_stride;
|
451
|
+
const size_t desired_wsize = gemm_workspace_size + alignof(uint64_t) - 1;
|
452
|
+
|
453
|
+
if (ith == 0 && params->wsize < desired_wsize) {
|
454
|
+
throw std::runtime_error("wsize less than desired_wsize");
|
455
|
+
}
|
456
|
+
|
457
|
+
std::vector<qnbitgemm_spacemit_ime_args> qnbitgemm_args(batch_feature);
|
458
|
+
|
459
|
+
for (size_t i = 0; i < batch_feature; i++) {
|
460
|
+
qnbitgemm_args[i].a_ptr = feature + gemm_m * gemm_k * i;
|
461
|
+
qnbitgemm_args[i].lda = gemm_k;
|
462
|
+
qnbitgemm_args[i].packed_quant_b_data = (const std::byte *) w_data;
|
463
|
+
qnbitgemm_args[i].quant_b_scale = nullptr;
|
464
|
+
|
465
|
+
if constexpr (std::is_same_v<BLOC_TYPE, block_q4_0>) {
|
466
|
+
qnbitgemm_args[i].quant_b_zp = nullptr;
|
467
|
+
} else {
|
468
|
+
qnbitgemm_args[i].quant_b_zp = w_data;
|
469
|
+
}
|
470
|
+
|
471
|
+
qnbitgemm_args[i].bias = nullptr;
|
472
|
+
qnbitgemm_args[i].c_ptr = output + gemm_m * gemm_n * i;
|
473
|
+
qnbitgemm_args[i].ldc = gemm_n;
|
474
|
+
}
|
475
|
+
|
476
|
+
const uintptr_t ws_ptr = reinterpret_cast<uintptr_t>(params->wdata);
|
477
|
+
void * ws = reinterpret_cast<void *>((ws_ptr + alignof(uint64_t) - 1) & (~(alignof(uint64_t) - 1)));
|
478
|
+
const size_t quant_a_stride = block_count_k * q8_blk_size(QK4_0);
|
479
|
+
|
480
|
+
{
|
481
|
+
constexpr size_t block_size_m = 4;
|
482
|
+
size_t per_gemm_block_count_m = div_round_up(gemm_m, block_size_m);
|
483
|
+
int32_t task_count = batch_feature * per_gemm_block_count_m;
|
484
|
+
int32_t task_per_thread = (task_count + nth - 1) / nth;
|
485
|
+
int32_t start = ith * task_per_thread;
|
486
|
+
int32_t end = std::min((ith + 1) * task_per_thread, task_count);
|
487
|
+
for (int32_t compute_idx = start; compute_idx < end; compute_idx++) {
|
488
|
+
int32_t gemm_idx = compute_idx / block_size_m;
|
489
|
+
int32_t m_idx = compute_idx % block_size_m * block_size_m;
|
490
|
+
const qnbitgemm_spacemit_ime_args & data = qnbitgemm_args[gemm_idx];
|
491
|
+
int32_t rows_tobe_handled = (gemm_m - m_idx) > block_size_m ? block_size_m : (gemm_m - m_idx);
|
492
|
+
|
493
|
+
if (rows_tobe_handled == block_size_m) {
|
494
|
+
const float * a_row_ptr = data.a_ptr + m_idx * data.lda;
|
495
|
+
std::byte * quant_a_row_ptr =
|
496
|
+
static_cast<std::byte *>(ws) + gemm_idx * per_gemm_workspace_stride + m_idx * quant_a_stride;
|
497
|
+
sqnbitgemm_spacemit_ime::ime1::quantize_a_4row_i8(QK4_0, a_row_ptr, gemm_k, quant_a_row_ptr);
|
498
|
+
} else {
|
499
|
+
while (rows_tobe_handled) {
|
500
|
+
const float * a_row_ptr = data.a_ptr + m_idx * data.lda;
|
501
|
+
std::byte * quant_a_row_ptr = static_cast<std::byte *>(ws) +
|
502
|
+
gemm_idx * per_gemm_workspace_stride + m_idx * quant_a_stride;
|
503
|
+
sqnbitgemm_spacemit_ime::ime1::quantize_a_row_i8(QK4_0, a_row_ptr, gemm_k, quant_a_row_ptr);
|
504
|
+
rows_tobe_handled -= 1;
|
505
|
+
m_idx += 1;
|
506
|
+
}
|
507
|
+
}
|
508
|
+
}
|
509
|
+
}
|
510
|
+
|
511
|
+
ggml_barrier(params->threadpool);
|
512
|
+
|
513
|
+
if (ith >= ggml::cpu::riscv64_spacemit::num_ai_cores) {
|
514
|
+
return;
|
515
|
+
}
|
516
|
+
nth = std::min(nth, int{ ggml::cpu::riscv64_spacemit::num_ai_cores });
|
517
|
+
|
518
|
+
size_t threads_per_gemm = nth / batch_feature;
|
519
|
+
constexpr size_t gemm_m_stride = 128;
|
520
|
+
size_t nc = gemm_n;
|
521
|
+
const size_t gemm_m_blocked = div_round_up(gemm_m, gemm_m_stride);
|
522
|
+
const size_t max_nc = div_round_up(gemm_n * gemm_m_blocked, threads_per_gemm);
|
523
|
+
if (max_nc < nc) {
|
524
|
+
nc = std::min(nc, div_round_up(max_nc, QGEMM_STRIDEN_THREAD_ALIGN) * QGEMM_STRIDEN_THREAD_ALIGN);
|
525
|
+
}
|
526
|
+
const size_t gemm_n_stride = nc;
|
527
|
+
const size_t thread_count_m = div_round_up(gemm_m, gemm_m_stride);
|
528
|
+
const size_t thread_count_n = div_round_up(gemm_n, gemm_n_stride);
|
529
|
+
threads_per_gemm = thread_count_m * thread_count_n;
|
530
|
+
|
531
|
+
{
|
532
|
+
int task_count = batch_feature * threads_per_gemm;
|
533
|
+
int task_per_thread = (task_count + nth - 1) / nth;
|
534
|
+
int start = ith * task_per_thread;
|
535
|
+
int end = std::min((ith + 1) * task_per_thread, task_count);
|
536
|
+
for (int compute_idx = start; compute_idx < end; compute_idx++) {
|
537
|
+
const auto gemm_i = compute_idx / threads_per_gemm;
|
538
|
+
const auto blk_i = compute_idx % threads_per_gemm;
|
539
|
+
const auto * data = &qnbitgemm_args[gemm_i];
|
540
|
+
|
541
|
+
const auto tid_n = blk_i / thread_count_m;
|
542
|
+
const auto tid_m = blk_i % thread_count_m;
|
543
|
+
|
544
|
+
const size_t m_start = tid_m * gemm_m_stride;
|
545
|
+
const size_t m_count = std::min(gemm_m - m_start, (size_t) gemm_m_stride);
|
546
|
+
|
547
|
+
const size_t n_start = tid_n * gemm_n_stride;
|
548
|
+
const size_t n_count = std::min(gemm_n - n_start, (size_t) gemm_n_stride);
|
549
|
+
|
550
|
+
void * per_gemm_ws = reinterpret_cast<std::byte *>(ws) + gemm_i * per_gemm_workspace_stride;
|
551
|
+
|
552
|
+
sqnbitgemm_spacemit_ime_i8i4(QK4_0, gemm_k, data, per_gemm_ws, m_start, m_count, n_start, n_count);
|
553
|
+
}
|
554
|
+
}
|
555
|
+
}
|
556
|
+
|
557
|
+
int repack(struct ggml_tensor * t, const void * data, size_t data_size) override {
|
558
|
+
GGML_LOG_DEBUG("%s: repack tensor %s with %s_%dx%d\n", __func__, t->name, ggml_type_name(t->type),
|
559
|
+
(int) NB_COLS, (int) INTER_SIZE);
|
560
|
+
return ggml::cpu::riscv64_spacemit::repack<BLOC_TYPE, INTER_SIZE, NB_COLS>(t, data, data_size);
|
561
|
+
}
|
562
|
+
};
|
563
|
+
|
564
|
+
class tensor_traits_common : public tensor_traits_base {
|
565
|
+
bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override {
|
566
|
+
switch (op->op) {
|
567
|
+
case GGML_OP_NORM:
|
568
|
+
case GGML_OP_RMS_NORM:
|
569
|
+
size = 0;
|
570
|
+
return true;
|
571
|
+
default:
|
572
|
+
// GGML_ABORT("fatal error");
|
573
|
+
break;
|
574
|
+
}
|
575
|
+
return false;
|
576
|
+
}
|
577
|
+
|
578
|
+
bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override {
|
579
|
+
switch (op->op) {
|
580
|
+
case GGML_OP_NORM:
|
581
|
+
forward_norm_f32(params, op);
|
582
|
+
return true;
|
583
|
+
case GGML_OP_RMS_NORM:
|
584
|
+
forward_rms_norm_f32(params, op);
|
585
|
+
return true;
|
586
|
+
default:
|
587
|
+
// GGML_ABORT("fatal error");
|
588
|
+
break;
|
589
|
+
}
|
590
|
+
return false;
|
591
|
+
}
|
592
|
+
|
593
|
+
void forward_norm_f32(ggml_compute_params * params, ggml_tensor * op) {
|
594
|
+
const ggml_tensor * src0 = op->src[0];
|
595
|
+
ggml_tensor * dst = op;
|
596
|
+
GGML_ASSERT(ggml_are_same_shape(src0, dst));
|
597
|
+
GGML_ASSERT(src0->nb[0] == sizeof(float));
|
598
|
+
|
599
|
+
const int ith = params->ith;
|
600
|
+
const int nth = params->nth;
|
601
|
+
|
602
|
+
GGML_TENSOR_UNARY_OP_LOCALS
|
603
|
+
|
604
|
+
float epsilon;
|
605
|
+
memcpy(&epsilon, dst->op_params, sizeof(float));
|
606
|
+
|
607
|
+
GGML_ASSERT(epsilon > 0.0f);
|
608
|
+
|
609
|
+
auto * input = (float *) src0->data;
|
610
|
+
auto * output = (float *) dst->data;
|
611
|
+
|
612
|
+
const auto hidden_size = ne00;
|
613
|
+
const auto task_count = ne01 * ne02 * ne03;
|
614
|
+
const auto task_per_thread = (task_count + nth - 1) / nth;
|
615
|
+
|
616
|
+
const auto task_begin = ith * task_per_thread;
|
617
|
+
const auto task_end = std::min((ith + 1) * task_per_thread, task_count);
|
618
|
+
|
619
|
+
for (auto task_idx = task_begin; task_idx < task_end; task_idx++) {
|
620
|
+
auto offset = task_idx * hidden_size;
|
621
|
+
auto * p_input = const_cast<float *>(input + offset);
|
622
|
+
|
623
|
+
auto * p_output = output + offset;
|
624
|
+
auto * p_temp_output = p_output;
|
625
|
+
auto * p_gamma_data = (const float *) nullptr;
|
626
|
+
auto * p_beta_data = (const float *) nullptr;
|
627
|
+
size_t gvl = __riscv_vsetvlmax_e32m4();
|
628
|
+
vfloat32m4_t sum = __riscv_vfmv_v_f_f32m4(0.f, gvl);
|
629
|
+
vfloat32m4_t sum_sq = __riscv_vfmv_v_f_f32m4(0.f, gvl);
|
630
|
+
int64_t length = hidden_size;
|
631
|
+
while (length > 0) {
|
632
|
+
gvl = __riscv_vsetvl_e32m4(length);
|
633
|
+
// load data
|
634
|
+
vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_input, gvl);
|
635
|
+
|
636
|
+
sum = __riscv_vfadd_vv_f32m4(sum, src_data, gvl);
|
637
|
+
sum_sq = __riscv_vfmacc_vv_f32m4(sum_sq, src_data, src_data, gvl);
|
638
|
+
|
639
|
+
__riscv_vse32_v_f32m4(p_temp_output, src_data, gvl);
|
640
|
+
|
641
|
+
p_input += gvl;
|
642
|
+
p_temp_output += gvl;
|
643
|
+
length -= gvl;
|
644
|
+
}
|
645
|
+
|
646
|
+
gvl = __riscv_vsetvlmax_e32m1();
|
647
|
+
|
648
|
+
float mean = 0.f;
|
649
|
+
vfloat32m1_t zero_v = __riscv_vfmv_v_f_f32m1(0.f, gvl);
|
650
|
+
vfloat32m1_t mean_v =
|
651
|
+
__riscv_vfadd_vv_f32m1(__riscv_vget_v_f32m4_f32m1(sum, 0), __riscv_vget_v_f32m4_f32m1(sum, 1), gvl);
|
652
|
+
mean_v = __riscv_vfadd_vv_f32m1(mean_v, __riscv_vget_v_f32m4_f32m1(sum, 2), gvl);
|
653
|
+
mean_v = __riscv_vfadd_vv_f32m1(mean_v, __riscv_vget_v_f32m4_f32m1(sum, 3), gvl);
|
654
|
+
mean_v = __riscv_vfredusum_vs_f32m1_f32m1(mean_v, zero_v, gvl);
|
655
|
+
mean = __riscv_vfmv_f_s_f32m1_f32(mean_v);
|
656
|
+
mean /= hidden_size;
|
657
|
+
|
658
|
+
vfloat32m1_t mean_square_v = __riscv_vfadd_vv_f32m1(__riscv_vget_v_f32m4_f32m1(sum_sq, 0),
|
659
|
+
__riscv_vget_v_f32m4_f32m1(sum_sq, 1), gvl);
|
660
|
+
mean_square_v = __riscv_vfadd_vv_f32m1(mean_square_v, __riscv_vget_v_f32m4_f32m1(sum_sq, 2), gvl);
|
661
|
+
mean_square_v = __riscv_vfadd_vv_f32m1(mean_square_v, __riscv_vget_v_f32m4_f32m1(sum_sq, 3), gvl);
|
662
|
+
mean_square_v = __riscv_vfredusum_vs_f32m1_f32m1(mean_square_v, zero_v, gvl);
|
663
|
+
|
664
|
+
float mean_square = __riscv_vfmv_f_s_f32m1_f32(mean_square_v);
|
665
|
+
mean_square /= hidden_size;
|
666
|
+
mean_square = sqrt(mean_square - mean * mean + epsilon);
|
667
|
+
|
668
|
+
mean_square = 1.0f / mean_square;
|
669
|
+
length = hidden_size;
|
670
|
+
p_temp_output = p_output;
|
671
|
+
|
672
|
+
if (p_gamma_data == nullptr && p_beta_data == nullptr) {
|
673
|
+
while (length > 0) {
|
674
|
+
gvl = __riscv_vsetvl_e32m4(length);
|
675
|
+
vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_temp_output, gvl);
|
676
|
+
src_data = __riscv_vfsub_vf_f32m4(src_data, mean, gvl);
|
677
|
+
src_data = __riscv_vfmul_vf_f32m4(src_data, mean_square, gvl);
|
678
|
+
__riscv_vse32_v_f32m4(p_output, src_data, gvl);
|
679
|
+
p_temp_output += gvl;
|
680
|
+
p_output += gvl;
|
681
|
+
length -= gvl;
|
682
|
+
}
|
683
|
+
} else if (p_beta_data == nullptr) {
|
684
|
+
while (length > 0) {
|
685
|
+
gvl = __riscv_vsetvl_e32m4(length);
|
686
|
+
vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_temp_output, gvl);
|
687
|
+
vfloat32m4_t gamma_data_v = __riscv_vle32_v_f32m4(p_gamma_data, gvl);
|
688
|
+
src_data = __riscv_vfsub_vf_f32m4(src_data, mean, gvl);
|
689
|
+
src_data = __riscv_vfmul_vf_f32m4(src_data, mean_square, gvl);
|
690
|
+
src_data = __riscv_vfmul_vv_f32m4(src_data, gamma_data_v, gvl);
|
691
|
+
__riscv_vse32_v_f32m4(p_output, src_data, gvl);
|
692
|
+
p_temp_output += gvl;
|
693
|
+
p_output += gvl;
|
694
|
+
p_gamma_data += gvl;
|
695
|
+
length -= gvl;
|
696
|
+
}
|
697
|
+
} else if (p_gamma_data != nullptr) {
|
698
|
+
while (length > 0) {
|
699
|
+
gvl = __riscv_vsetvl_e32m4(length);
|
700
|
+
vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_temp_output, gvl);
|
701
|
+
vfloat32m4_t gamma_data_v = __riscv_vle32_v_f32m4(p_gamma_data, gvl);
|
702
|
+
src_data = __riscv_vfsub_vf_f32m4(src_data, mean, gvl);
|
703
|
+
src_data = __riscv_vfmul_vf_f32m4(src_data, mean_square, gvl);
|
704
|
+
src_data = __riscv_vfmul_vv_f32m4(src_data, gamma_data_v, gvl);
|
705
|
+
vfloat32m4_t beta_data_v = __riscv_vle32_v_f32m4(p_beta_data, gvl);
|
706
|
+
src_data = __riscv_vfadd_vv_f32m4(src_data, beta_data_v, gvl);
|
707
|
+
p_beta_data += gvl;
|
708
|
+
__riscv_vse32_v_f32m4(p_output, src_data, gvl);
|
709
|
+
p_temp_output += gvl;
|
710
|
+
p_output += gvl;
|
711
|
+
p_gamma_data += gvl;
|
712
|
+
length -= gvl;
|
713
|
+
}
|
714
|
+
}
|
715
|
+
}
|
716
|
+
}
|
717
|
+
|
718
|
+
void forward_rms_norm_f32(ggml_compute_params * params, ggml_tensor * op) {
|
719
|
+
const ggml_tensor * src0 = op->src[0];
|
720
|
+
ggml_tensor * dst = op;
|
721
|
+
GGML_ASSERT(ggml_are_same_shape(src0, dst));
|
722
|
+
GGML_ASSERT(src0->nb[0] == sizeof(float));
|
723
|
+
|
724
|
+
const int ith = params->ith;
|
725
|
+
const int nth = params->nth;
|
726
|
+
|
727
|
+
GGML_TENSOR_UNARY_OP_LOCALS
|
728
|
+
|
729
|
+
float epsilon;
|
730
|
+
memcpy(&epsilon, dst->op_params, sizeof(float));
|
731
|
+
|
732
|
+
GGML_ASSERT(epsilon > 0.0f);
|
733
|
+
|
734
|
+
auto * input = (float *) src0->data;
|
735
|
+
auto * output = (float *) dst->data;
|
736
|
+
|
737
|
+
const auto hidden_size = ne00;
|
738
|
+
const auto task_count = ne01 * ne02 * ne03;
|
739
|
+
const auto task_per_thread = (task_count + nth - 1) / nth;
|
740
|
+
|
741
|
+
const auto task_begin = ith * task_per_thread;
|
742
|
+
const auto task_end = std::min((ith + 1) * task_per_thread, task_count);
|
743
|
+
|
744
|
+
for (auto task_idx = task_begin; task_idx < task_end; task_idx++) {
|
745
|
+
auto offset = task_idx * hidden_size;
|
746
|
+
auto * p_input = const_cast<float *>(input + offset);
|
747
|
+
auto * p_output = output + offset;
|
748
|
+
auto * p_temp_output = p_output;
|
749
|
+
auto * p_gamma_data = (const float *) nullptr;
|
750
|
+
auto * p_beta_data = (const float *) nullptr;
|
751
|
+
|
752
|
+
size_t gvl = __riscv_vsetvlmax_e32m4();
|
753
|
+
// vfloat32m4_t sum = __riscv_vfmv_v_f_f32m4(0.f, gvl);
|
754
|
+
vfloat32m4_t sum_sq = __riscv_vfmv_v_f_f32m4(0.f, gvl);
|
755
|
+
int64_t length = hidden_size;
|
756
|
+
while (length > 0) {
|
757
|
+
gvl = __riscv_vsetvl_e32m4(length);
|
758
|
+
// load data
|
759
|
+
vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_input, gvl);
|
760
|
+
|
761
|
+
sum_sq = __riscv_vfmacc_vv_f32m4(sum_sq, src_data, src_data, gvl);
|
762
|
+
|
763
|
+
__riscv_vse32_v_f32m4(p_temp_output, src_data, gvl);
|
764
|
+
|
765
|
+
p_input += gvl;
|
766
|
+
p_temp_output += gvl;
|
767
|
+
length -= gvl;
|
768
|
+
}
|
769
|
+
|
770
|
+
gvl = __riscv_vsetvlmax_e32m1();
|
771
|
+
|
772
|
+
// float mean = 0.f;
|
773
|
+
vfloat32m1_t zero_v = __riscv_vfmv_v_f_f32m1(0.f, gvl);
|
774
|
+
|
775
|
+
vfloat32m1_t mean_square_v = __riscv_vfadd_vv_f32m1(__riscv_vget_v_f32m4_f32m1(sum_sq, 0),
|
776
|
+
__riscv_vget_v_f32m4_f32m1(sum_sq, 1), gvl);
|
777
|
+
mean_square_v = __riscv_vfadd_vv_f32m1(mean_square_v, __riscv_vget_v_f32m4_f32m1(sum_sq, 2), gvl);
|
778
|
+
mean_square_v = __riscv_vfadd_vv_f32m1(mean_square_v, __riscv_vget_v_f32m4_f32m1(sum_sq, 3), gvl);
|
779
|
+
mean_square_v = __riscv_vfredusum_vs_f32m1_f32m1(mean_square_v, zero_v, gvl);
|
780
|
+
|
781
|
+
float mean_square = __riscv_vfmv_f_s_f32m1_f32(mean_square_v);
|
782
|
+
mean_square /= hidden_size;
|
783
|
+
|
784
|
+
mean_square = sqrt(mean_square + epsilon);
|
785
|
+
|
786
|
+
mean_square = 1.0f / mean_square;
|
787
|
+
length = hidden_size;
|
788
|
+
p_temp_output = p_output;
|
789
|
+
|
790
|
+
if (p_gamma_data == nullptr && p_beta_data == nullptr) {
|
791
|
+
while (length > 0) {
|
792
|
+
gvl = __riscv_vsetvl_e32m4(length);
|
793
|
+
vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_temp_output, gvl);
|
794
|
+
src_data = __riscv_vfmul_vf_f32m4(src_data, mean_square, gvl);
|
795
|
+
__riscv_vse32_v_f32m4(p_output, src_data, gvl);
|
796
|
+
p_temp_output += gvl;
|
797
|
+
p_output += gvl;
|
798
|
+
length -= gvl;
|
799
|
+
}
|
800
|
+
} else if (p_beta_data == nullptr) {
|
801
|
+
while (length > 0) {
|
802
|
+
gvl = __riscv_vsetvl_e32m4(length);
|
803
|
+
vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_temp_output, gvl);
|
804
|
+
vfloat32m4_t gamma_data_v = __riscv_vle32_v_f32m4(p_gamma_data, gvl);
|
805
|
+
src_data = __riscv_vfmul_vf_f32m4(src_data, mean_square, gvl);
|
806
|
+
src_data = __riscv_vfmul_vv_f32m4(src_data, gamma_data_v, gvl);
|
807
|
+
__riscv_vse32_v_f32m4(p_output, src_data, gvl);
|
808
|
+
p_temp_output += gvl;
|
809
|
+
p_output += gvl;
|
810
|
+
p_gamma_data += gvl;
|
811
|
+
length -= gvl;
|
812
|
+
}
|
813
|
+
} else if (p_gamma_data != nullptr) {
|
814
|
+
while (length > 0) {
|
815
|
+
gvl = __riscv_vsetvl_e32m4(length);
|
816
|
+
vfloat32m4_t src_data = __riscv_vle32_v_f32m4(p_temp_output, gvl);
|
817
|
+
vfloat32m4_t gamma_data_v = __riscv_vle32_v_f32m4(p_gamma_data, gvl);
|
818
|
+
src_data = __riscv_vfmul_vf_f32m4(src_data, mean_square, gvl);
|
819
|
+
src_data = __riscv_vfmul_vv_f32m4(src_data, gamma_data_v, gvl);
|
820
|
+
vfloat32m4_t beta_data_v = __riscv_vle32_v_f32m4(p_beta_data, gvl);
|
821
|
+
src_data = __riscv_vfadd_vv_f32m4(src_data, beta_data_v, gvl);
|
822
|
+
p_beta_data += gvl;
|
823
|
+
__riscv_vse32_v_f32m4(p_output, src_data, gvl);
|
824
|
+
p_temp_output += gvl;
|
825
|
+
p_output += gvl;
|
826
|
+
p_gamma_data += gvl;
|
827
|
+
length -= gvl;
|
828
|
+
}
|
829
|
+
}
|
830
|
+
}
|
831
|
+
}
|
832
|
+
|
833
|
+
int repack(struct ggml_tensor * t, const void * data, size_t data_size) override {
|
834
|
+
memcpy(t->data, data, data_size);
|
835
|
+
return 0;
|
836
|
+
}
|
837
|
+
};
|
838
|
+
|
839
|
+
static const tensor_traits<block_q4_0, 8, 16> q4_0_16x8_q8_0;
|
840
|
+
static const tensor_traits<block_q4_1, 8, 16> q4_1_16x8_q8_0;
|
841
|
+
static const tensor_traits<block_q4_K, 8, 16> q4_k_16x8_q8_0;
|
842
|
+
static const tensor_traits_common rvv_impl;
|
843
|
+
|
844
|
+
} // namespace ggml::cpu::riscv64_spacemit
|
845
|
+
|
846
|
+
static const ggml::cpu::tensor_traits * ggml_riscv64_spacemit_get_optimal_repack_type(const struct ggml_tensor * cur) {
|
847
|
+
if (cur->type == GGML_TYPE_Q4_0) {
|
848
|
+
if (cur->ne[1] % 16 == 0) {
|
849
|
+
return &ggml::cpu::riscv64_spacemit::q4_0_16x8_q8_0;
|
850
|
+
}
|
851
|
+
} else if (cur->type == GGML_TYPE_Q4_1) {
|
852
|
+
if (cur->ne[1] % 16 == 0) {
|
853
|
+
return &ggml::cpu::riscv64_spacemit::q4_1_16x8_q8_0;
|
854
|
+
}
|
855
|
+
} else if (cur->type == GGML_TYPE_Q4_K) {
|
856
|
+
if (cur->ne[1] % 16 == 0) {
|
857
|
+
return &ggml::cpu::riscv64_spacemit::q4_k_16x8_q8_0;
|
858
|
+
}
|
859
|
+
} else if (cur->type == GGML_TYPE_F32) {
|
860
|
+
return &ggml::cpu::riscv64_spacemit::rvv_impl;
|
861
|
+
}
|
862
|
+
|
863
|
+
return nullptr;
|
864
|
+
}
|
865
|
+
|
866
|
+
static enum ggml_status ggml_backend_riscv64_spacemit_buffer_init_tensor(ggml_backend_buffer_t buffer,
|
867
|
+
struct ggml_tensor * tensor) {
|
868
|
+
tensor->extra =
|
869
|
+
(void *) const_cast<ggml::cpu::tensor_traits *>(ggml_riscv64_spacemit_get_optimal_repack_type(tensor));
|
870
|
+
|
871
|
+
GGML_UNUSED(buffer);
|
872
|
+
|
873
|
+
return GGML_STATUS_SUCCESS;
|
874
|
+
}
|
875
|
+
|
876
|
+
static void ggml_backend_riscv64_spacemit_buffer_set_tensor(ggml_backend_buffer_t buffer,
|
877
|
+
struct ggml_tensor * tensor,
|
878
|
+
const void * data,
|
879
|
+
size_t offset,
|
880
|
+
size_t size) {
|
881
|
+
GGML_ASSERT(offset == 0);
|
882
|
+
GGML_ASSERT(size == ggml_nbytes(tensor));
|
883
|
+
|
884
|
+
auto tensor_traits = (ggml::cpu::riscv64_spacemit::tensor_traits_base *) tensor->extra;
|
885
|
+
if (tensor_traits) {
|
886
|
+
auto OK = tensor_traits->repack(tensor, data, size);
|
887
|
+
GGML_ASSERT(OK == 0);
|
888
|
+
}
|
889
|
+
|
890
|
+
GGML_UNUSED(buffer);
|
891
|
+
}
|
892
|
+
|
893
|
+
static const char * ggml_backend_cpu_riscv64_spacemit_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
894
|
+
return "CPU_RISCV64_SPACEMIT";
|
895
|
+
|
896
|
+
GGML_UNUSED(buft);
|
897
|
+
}
|
898
|
+
|
899
|
+
static ggml_backend_buffer_t ggml_backend_cpu_riscv64_spacemit_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft,
|
900
|
+
size_t size) {
|
901
|
+
ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
|
902
|
+
|
903
|
+
if (buffer == nullptr) {
|
904
|
+
return nullptr;
|
905
|
+
}
|
906
|
+
|
907
|
+
buffer->buft = buft;
|
908
|
+
buffer->iface.init_tensor = ggml_backend_riscv64_spacemit_buffer_init_tensor;
|
909
|
+
buffer->iface.set_tensor = ggml_backend_riscv64_spacemit_buffer_set_tensor;
|
910
|
+
buffer->iface.get_tensor = nullptr;
|
911
|
+
buffer->iface.cpy_tensor = nullptr;
|
912
|
+
return buffer;
|
913
|
+
}
|
914
|
+
|
915
|
+
static size_t ggml_backend_cpu_riscv64_spacemit_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
|
916
|
+
return 64;
|
917
|
+
|
918
|
+
GGML_UNUSED(buft);
|
919
|
+
}
|
920
|
+
|
921
|
+
static size_t ggml_backend_cpu_riscv64_spacemit_nbytes(ggml_backend_buffer_type_t buft,
|
922
|
+
const struct ggml_tensor * tensor) {
|
923
|
+
for (int i = 0; i < GGML_MAX_DIMS; ++i) {
|
924
|
+
if (tensor->ne[i] <= 0) {
|
925
|
+
return 0;
|
926
|
+
}
|
927
|
+
}
|
928
|
+
|
929
|
+
size_t nbytes;
|
930
|
+
const size_t blck_size = ggml_blck_size(tensor->type);
|
931
|
+
if (blck_size == 1) {
|
932
|
+
nbytes = ggml_type_size(tensor->type);
|
933
|
+
for (int i = 0; i < GGML_MAX_DIMS; ++i) {
|
934
|
+
nbytes += (tensor->ne[i] - 1) * tensor->nb[i];
|
935
|
+
}
|
936
|
+
} else {
|
937
|
+
nbytes = tensor->ne[0] * tensor->nb[0] / blck_size;
|
938
|
+
if (tensor->type == GGML_TYPE_Q4_K) {
|
939
|
+
GGML_ASSERT(nbytes % sizeof(block_q4_K) == 0);
|
940
|
+
nbytes = (nbytes / sizeof(block_q4_K)) * sizeof(block_q4_1) * 8;
|
941
|
+
for (int i = 1; i < GGML_MAX_DIMS; ++i) {
|
942
|
+
nbytes += (tensor->ne[i] - 1) * (tensor->nb[i] / sizeof(block_q4_K)) * sizeof(block_q4_1) * 8;
|
943
|
+
}
|
944
|
+
} else {
|
945
|
+
for (int i = 1; i < GGML_MAX_DIMS; ++i) {
|
946
|
+
nbytes += (tensor->ne[i] - 1) * tensor->nb[i];
|
947
|
+
}
|
948
|
+
}
|
949
|
+
}
|
950
|
+
|
951
|
+
GGML_UNUSED(buft);
|
952
|
+
return nbytes;
|
953
|
+
}
|
954
|
+
|
955
|
+
namespace ggml::cpu::riscv64_spacemit {
|
956
|
+
|
957
|
+
class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
958
|
+
bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override {
|
959
|
+
switch (op->op) {
|
960
|
+
case GGML_OP_MUL_MAT:
|
961
|
+
if (op->src[0]->buffer && (ggml_n_dims(op->src[0]) == 2) &&
|
962
|
+
op->src[0]->buffer->buft == ggml_backend_cpu_riscv64_spacemit_buffer_type() &&
|
963
|
+
ggml_riscv64_spacemit_get_optimal_repack_type(op->src[0])) {
|
964
|
+
if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) {
|
965
|
+
return false;
|
966
|
+
}
|
967
|
+
if (op->src[1]->type == GGML_TYPE_F32) {
|
968
|
+
return true;
|
969
|
+
}
|
970
|
+
}
|
971
|
+
break;
|
972
|
+
case GGML_OP_NORM:
|
973
|
+
case GGML_OP_RMS_NORM:
|
974
|
+
if (op->src[0]->type == GGML_TYPE_F32) {
|
975
|
+
return true;
|
976
|
+
}
|
977
|
+
break;
|
978
|
+
default:
|
979
|
+
// GGML_ABORT("fatal error");
|
980
|
+
break;
|
981
|
+
}
|
982
|
+
return false;
|
983
|
+
}
|
984
|
+
|
985
|
+
ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override {
|
986
|
+
switch (op->op) {
|
987
|
+
case GGML_OP_MUL_MAT:
|
988
|
+
if (op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_cpu_riscv64_spacemit_buffer_type()) {
|
989
|
+
return (ggml::cpu::tensor_traits *) op->src[0]->extra;
|
990
|
+
}
|
991
|
+
break;
|
992
|
+
case GGML_OP_NORM:
|
993
|
+
case GGML_OP_RMS_NORM:
|
994
|
+
return (ggml::cpu::tensor_traits *) (&ggml::cpu::riscv64_spacemit::rvv_impl);
|
995
|
+
default:
|
996
|
+
// GGML_ABORT("fatal error");
|
997
|
+
break;
|
998
|
+
}
|
999
|
+
|
1000
|
+
return nullptr;
|
1001
|
+
}
|
1002
|
+
};
|
1003
|
+
|
1004
|
+
} // namespace ggml::cpu::riscv64_spacemit
|
1005
|
+
|
1006
|
+
ggml_backend_buffer_type_t ggml_backend_cpu_riscv64_spacemit_buffer_type(void) {
|
1007
|
+
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_riscv64_spacemit = {
|
1008
|
+
/* .iface = */
|
1009
|
+
{
|
1010
|
+
/* .get_name = */ ggml_backend_cpu_riscv64_spacemit_buffer_type_get_name,
|
1011
|
+
/* .alloc_buffer = */ ggml_backend_cpu_riscv64_spacemit_buffer_type_alloc_buffer,
|
1012
|
+
/* .get_alignment = */ ggml_backend_cpu_riscv64_spacemit_buffer_type_get_alignment,
|
1013
|
+
/* .get_max_size = */ nullptr,
|
1014
|
+
/* .get_alloc_size = */ ggml_backend_cpu_riscv64_spacemit_nbytes,
|
1015
|
+
/* .is_host = */ nullptr,
|
1016
|
+
},
|
1017
|
+
/* .device = */
|
1018
|
+
ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
|
1019
|
+
/* .context = */
|
1020
|
+
new ggml::cpu::riscv64_spacemit::extra_buffer_type(),
|
1021
|
+
};
|
1022
|
+
|
1023
|
+
return &ggml_backend_cpu_buffer_type_riscv64_spacemit;
|
1024
|
+
}
|