whispercpp 1.3.0 → 1.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +6 -0
- data/LICENSE +1 -1
- data/README.md +216 -424
- data/Rakefile +79 -11
- data/ext/.gitignore +11 -0
- data/ext/dependencies.rb +61 -0
- data/ext/extconf.rb +18 -26
- data/ext/options.rb +221 -0
- data/ext/ruby_whisper.c +159 -0
- data/ext/ruby_whisper.h +27 -2
- data/ext/ruby_whisper_context.c +641 -0
- data/ext/ruby_whisper_error.c +52 -0
- data/ext/ruby_whisper_model.c +232 -0
- data/ext/ruby_whisper_params.c +1301 -0
- data/ext/ruby_whisper_segment.c +143 -0
- data/ext/ruby_whisper_transcribe.cpp +87 -0
- data/ext/ruby_whisper_vad_params.c +288 -0
- data/ext/sources/.dockerignore +3 -0
- data/ext/sources/.github/workflows/bindings-ruby.yml +21 -0
- data/ext/sources/CMakeGraphVizOptions.cmake +8 -0
- data/ext/sources/CMakeLists.txt +251 -0
- data/ext/sources/bindings/javascript/CMakeLists.txt +41 -0
- data/ext/sources/bindings/javascript/emscripten.cpp +93 -0
- data/ext/sources/bindings/javascript/libwhisper.worker.js +1 -0
- data/ext/sources/bindings/javascript/package-tmpl.json +26 -0
- data/ext/sources/bindings/javascript/package.json +26 -0
- data/ext/sources/bindings/javascript/whisper.js +19 -0
- data/ext/sources/build-xcframework.sh +547 -0
- data/ext/sources/ci/run.sh +336 -0
- data/ext/sources/close-issue.yml +28 -0
- data/ext/sources/cmake/DefaultTargetOptions.cmake +16 -0
- data/ext/sources/cmake/FindFFmpeg.cmake +163 -0
- data/ext/sources/cmake/build-info.cmake +60 -0
- data/ext/sources/cmake/git-vars.cmake +22 -0
- data/ext/sources/cmake/whisper-config.cmake.in +65 -0
- data/ext/sources/cmake/whisper.pc.in +10 -0
- data/ext/sources/examples/CMakeLists.txt +124 -0
- data/ext/sources/examples/addon.node/CMakeLists.txt +31 -0
- data/ext/sources/examples/addon.node/__test__/whisper.spec.js +37 -0
- data/ext/sources/examples/addon.node/addon.cpp +438 -0
- data/ext/sources/examples/addon.node/index.js +54 -0
- data/ext/sources/examples/addon.node/package.json +16 -0
- data/ext/sources/examples/bench/CMakeLists.txt +8 -0
- data/ext/sources/examples/bench/bench.cpp +175 -0
- data/ext/sources/examples/bench.wasm/CMakeLists.txt +49 -0
- data/ext/sources/examples/bench.wasm/emscripten.cpp +87 -0
- data/ext/sources/examples/bench.wasm/index-tmpl.html +284 -0
- data/ext/sources/examples/cli/CMakeLists.txt +8 -0
- data/ext/sources/examples/cli/cli.cpp +1294 -0
- data/ext/sources/examples/coi-serviceworker.js +146 -0
- data/ext/sources/examples/command/CMakeLists.txt +10 -0
- data/ext/sources/examples/command/command.cpp +776 -0
- data/ext/sources/examples/command/commands.txt +9 -0
- data/ext/sources/examples/command.wasm/CMakeLists.txt +50 -0
- data/ext/sources/examples/command.wasm/emscripten.cpp +327 -0
- data/ext/sources/examples/command.wasm/index-tmpl.html +414 -0
- data/ext/sources/examples/common-ggml.cpp +238 -0
- data/ext/sources/examples/common-ggml.h +18 -0
- data/ext/sources/examples/common-sdl.cpp +227 -0
- data/ext/sources/examples/common-sdl.h +49 -0
- data/ext/sources/examples/common-whisper.cpp +168 -0
- data/ext/sources/examples/common-whisper.h +24 -0
- data/ext/sources/examples/common.cpp +675 -0
- data/ext/sources/examples/common.h +322 -0
- data/ext/sources/examples/deprecation-warning/CMakeLists.txt +6 -0
- data/ext/sources/examples/deprecation-warning/deprecation-warning.cpp +38 -0
- data/ext/sources/examples/ffmpeg-transcode.cpp +368 -0
- data/ext/sources/examples/generate-karaoke.sh +57 -0
- data/ext/sources/examples/grammar-parser.cpp +423 -0
- data/ext/sources/examples/grammar-parser.h +29 -0
- data/ext/sources/examples/helpers.js +191 -0
- data/ext/sources/examples/json.hpp +24596 -0
- data/ext/sources/examples/livestream.sh +112 -0
- data/ext/sources/examples/lsp/CMakeLists.txt +9 -0
- data/ext/sources/examples/lsp/lsp.cpp +467 -0
- data/ext/sources/examples/lsp/whisper.vim +362 -0
- data/ext/sources/examples/miniaudio.h +93468 -0
- data/ext/sources/examples/python/test_whisper_processor.py +7 -0
- data/ext/sources/examples/python/whisper_processor.py +54 -0
- data/ext/sources/examples/quantize/CMakeLists.txt +6 -0
- data/ext/sources/examples/quantize/quantize.cpp +223 -0
- data/ext/sources/examples/server/CMakeLists.txt +12 -0
- data/ext/sources/examples/server/bench.js +29 -0
- data/ext/sources/examples/server/httplib.h +10497 -0
- data/ext/sources/examples/server/server.cpp +1091 -0
- data/ext/sources/examples/server.py +115 -0
- data/ext/sources/examples/stb_vorbis.c +5584 -0
- data/ext/sources/examples/stream/CMakeLists.txt +10 -0
- data/ext/sources/examples/stream/stream.cpp +429 -0
- data/ext/sources/examples/stream.wasm/CMakeLists.txt +49 -0
- data/ext/sources/examples/stream.wasm/emscripten.cpp +216 -0
- data/ext/sources/examples/stream.wasm/index-tmpl.html +414 -0
- data/ext/sources/examples/sycl/CMakeLists.txt +9 -0
- data/ext/sources/examples/sycl/build.sh +22 -0
- data/ext/sources/examples/sycl/ls-sycl-device.cpp +11 -0
- data/ext/sources/examples/sycl/run-whisper.sh +17 -0
- data/ext/sources/examples/talk-llama/CMakeLists.txt +40 -0
- data/ext/sources/examples/talk-llama/eleven-labs.py +80 -0
- data/ext/sources/examples/talk-llama/llama-adapter.cpp +388 -0
- data/ext/sources/examples/talk-llama/llama-adapter.h +76 -0
- data/ext/sources/examples/talk-llama/llama-arch.cpp +1746 -0
- data/ext/sources/examples/talk-llama/llama-arch.h +437 -0
- data/ext/sources/examples/talk-llama/llama-batch.cpp +374 -0
- data/ext/sources/examples/talk-llama/llama-batch.h +89 -0
- data/ext/sources/examples/talk-llama/llama-chat.cpp +663 -0
- data/ext/sources/examples/talk-llama/llama-chat.h +58 -0
- data/ext/sources/examples/talk-llama/llama-context.cpp +2676 -0
- data/ext/sources/examples/talk-llama/llama-context.h +276 -0
- data/ext/sources/examples/talk-llama/llama-cparams.cpp +5 -0
- data/ext/sources/examples/talk-llama/llama-cparams.h +41 -0
- data/ext/sources/examples/talk-llama/llama-grammar.cpp +1229 -0
- data/ext/sources/examples/talk-llama/llama-grammar.h +173 -0
- data/ext/sources/examples/talk-llama/llama-graph.cpp +1618 -0
- data/ext/sources/examples/talk-llama/llama-graph.h +640 -0
- data/ext/sources/examples/talk-llama/llama-hparams.cpp +95 -0
- data/ext/sources/examples/talk-llama/llama-hparams.h +190 -0
- data/ext/sources/examples/talk-llama/llama-impl.cpp +167 -0
- data/ext/sources/examples/talk-llama/llama-impl.h +61 -0
- data/ext/sources/examples/talk-llama/llama-io.cpp +15 -0
- data/ext/sources/examples/talk-llama/llama-io.h +35 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache.cpp +2739 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache.h +502 -0
- data/ext/sources/examples/talk-llama/llama-kv-cells.h +379 -0
- data/ext/sources/examples/talk-llama/llama-memory.cpp +1 -0
- data/ext/sources/examples/talk-llama/llama-memory.h +32 -0
- data/ext/sources/examples/talk-llama/llama-mmap.cpp +600 -0
- data/ext/sources/examples/talk-llama/llama-mmap.h +68 -0
- data/ext/sources/examples/talk-llama/llama-model-loader.cpp +1138 -0
- data/ext/sources/examples/talk-llama/llama-model-loader.h +169 -0
- data/ext/sources/examples/talk-llama/llama-model-saver.cpp +281 -0
- data/ext/sources/examples/talk-llama/llama-model-saver.h +37 -0
- data/ext/sources/examples/talk-llama/llama-model.cpp +13814 -0
- data/ext/sources/examples/talk-llama/llama-model.h +425 -0
- data/ext/sources/examples/talk-llama/llama-quant.cpp +966 -0
- data/ext/sources/examples/talk-llama/llama-quant.h +1 -0
- data/ext/sources/examples/talk-llama/llama-sampling.cpp +2575 -0
- data/ext/sources/examples/talk-llama/llama-sampling.h +32 -0
- data/ext/sources/examples/talk-llama/llama-vocab.cpp +3340 -0
- data/ext/sources/examples/talk-llama/llama-vocab.h +131 -0
- data/ext/sources/examples/talk-llama/llama.cpp +354 -0
- data/ext/sources/examples/talk-llama/llama.h +1377 -0
- data/ext/sources/examples/talk-llama/prompts/talk-alpaca.txt +23 -0
- data/ext/sources/examples/talk-llama/speak +40 -0
- data/ext/sources/examples/talk-llama/speak.bat +1 -0
- data/ext/sources/examples/talk-llama/speak.ps1 +14 -0
- data/ext/sources/examples/talk-llama/talk-llama.cpp +808 -0
- data/ext/sources/examples/talk-llama/unicode-data.cpp +7034 -0
- data/ext/sources/examples/talk-llama/unicode-data.h +20 -0
- data/ext/sources/examples/talk-llama/unicode.cpp +849 -0
- data/ext/sources/examples/talk-llama/unicode.h +66 -0
- data/ext/sources/examples/vad-speech-segments/CMakeLists.txt +8 -0
- data/ext/sources/examples/vad-speech-segments/speech.cpp +143 -0
- data/ext/sources/examples/wchess/CMakeLists.txt +10 -0
- data/ext/sources/examples/wchess/libwchess/CMakeLists.txt +19 -0
- data/ext/sources/examples/wchess/libwchess/Chessboard.cpp +803 -0
- data/ext/sources/examples/wchess/libwchess/Chessboard.h +33 -0
- data/ext/sources/examples/wchess/libwchess/WChess.cpp +193 -0
- data/ext/sources/examples/wchess/libwchess/WChess.h +63 -0
- data/ext/sources/examples/wchess/libwchess/test-chessboard.cpp +117 -0
- data/ext/sources/examples/wchess/wchess.cmd/CMakeLists.txt +8 -0
- data/ext/sources/examples/wchess/wchess.cmd/wchess.cmd.cpp +249 -0
- data/ext/sources/examples/whisper.wasm/CMakeLists.txt +50 -0
- data/ext/sources/examples/whisper.wasm/emscripten.cpp +118 -0
- data/ext/sources/examples/whisper.wasm/index-tmpl.html +658 -0
- data/ext/sources/ggml/CMakeLists.txt +390 -0
- data/ext/sources/ggml/cmake/BuildTypes.cmake +54 -0
- data/ext/sources/ggml/cmake/GitVars.cmake +22 -0
- data/ext/sources/ggml/cmake/common.cmake +26 -0
- data/ext/sources/ggml/cmake/ggml-config.cmake.in +152 -0
- data/ext/sources/ggml/include/ggml-alloc.h +76 -0
- data/ext/sources/ggml/include/ggml-backend.h +354 -0
- data/ext/sources/ggml/include/ggml-blas.h +25 -0
- data/ext/sources/ggml/include/ggml-cann.h +123 -0
- data/ext/sources/ggml/include/ggml-cpp.h +39 -0
- data/ext/sources/ggml/include/ggml-cpu.h +143 -0
- data/ext/sources/ggml/include/ggml-cuda.h +47 -0
- data/ext/sources/ggml/include/ggml-kompute.h +50 -0
- data/ext/sources/ggml/include/ggml-metal.h +66 -0
- data/ext/sources/ggml/include/ggml-opencl.h +26 -0
- data/ext/sources/ggml/include/ggml-opt.h +237 -0
- data/ext/sources/ggml/include/ggml-rpc.h +33 -0
- data/ext/sources/ggml/include/ggml-sycl.h +49 -0
- data/ext/sources/ggml/include/ggml-vulkan.h +29 -0
- data/ext/{ggml.h → sources/ggml/include/ggml.h} +621 -821
- data/ext/sources/ggml/include/gguf.h +202 -0
- data/ext/sources/ggml/src/CMakeLists.txt +346 -0
- data/ext/sources/ggml/src/ggml-alloc.c +1042 -0
- data/ext/sources/ggml/src/ggml-amx/CMakeLists.txt +107 -0
- data/ext/sources/ggml/src/ggml-amx/common.h +94 -0
- data/ext/sources/ggml/src/ggml-amx/ggml-amx.cpp +446 -0
- data/ext/sources/ggml/src/ggml-amx/mmq.cpp +2510 -0
- data/ext/sources/ggml/src/ggml-amx/mmq.h +17 -0
- data/ext/sources/ggml/src/ggml-backend-impl.h +255 -0
- data/ext/sources/ggml/src/ggml-backend-reg.cpp +586 -0
- data/ext/sources/ggml/src/ggml-backend.cpp +2011 -0
- data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +87 -0
- data/ext/sources/ggml/src/ggml-blas/ggml-blas.cpp +517 -0
- data/ext/sources/ggml/src/ggml-cann/CMakeLists.txt +74 -0
- data/ext/sources/ggml/src/ggml-cann/Doxyfile +2579 -0
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.cpp +181 -0
- data/ext/sources/ggml/src/ggml-cann/acl_tensor.h +258 -0
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.cpp +3193 -0
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +1125 -0
- data/ext/sources/ggml/src/ggml-cann/common.h +420 -0
- data/ext/sources/ggml/src/ggml-cann/ggml-cann.cpp +2606 -0
- data/ext/sources/ggml/src/ggml-cann/kernels/CMakeLists.txt +30 -0
- data/ext/sources/ggml/src/ggml-cann/kernels/ascendc_kernels.h +19 -0
- data/ext/sources/ggml/src/ggml-cann/kernels/dup.cpp +234 -0
- data/ext/sources/ggml/src/ggml-cann/kernels/get_row_f16.cpp +197 -0
- data/ext/sources/ggml/src/ggml-cann/kernels/get_row_f32.cpp +190 -0
- data/ext/sources/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +204 -0
- data/ext/sources/ggml/src/ggml-cann/kernels/get_row_q8_0.cpp +191 -0
- data/ext/sources/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +218 -0
- data/ext/sources/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +216 -0
- data/ext/sources/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +295 -0
- data/ext/sources/ggml/src/ggml-common.h +1857 -0
- data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +504 -0
- data/ext/sources/ggml/src/ggml-cpu/amx/amx.cpp +221 -0
- data/ext/sources/ggml/src/ggml-cpu/amx/amx.h +8 -0
- data/ext/sources/ggml/src/ggml-cpu/amx/common.h +91 -0
- data/ext/sources/ggml/src/ggml-cpu/amx/mmq.cpp +2511 -0
- data/ext/sources/ggml/src/ggml-cpu/amx/mmq.h +10 -0
- data/ext/sources/ggml/src/ggml-cpu/binary-ops.cpp +158 -0
- data/ext/sources/ggml/src/ggml-cpu/binary-ops.h +16 -0
- data/ext/sources/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +100 -0
- data/ext/sources/ggml/src/ggml-cpu/common.h +72 -0
- data/ext/sources/ggml/src/ggml-cpu/cpu-feats-x86.cpp +327 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +6431 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +8 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp +55 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-hbm.h +8 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-impl.h +508 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-quants.c +13747 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-quants.h +63 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-traits.cpp +36 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu-traits.h +38 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +3510 -0
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.cpp +671 -0
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +337 -0
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.h +95 -0
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +482 -0
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.h +17 -0
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +3544 -0
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.h +14 -0
- data/ext/sources/ggml/src/ggml-cpu/ops.cpp +8903 -0
- data/ext/sources/ggml/src/ggml-cpu/ops.h +110 -0
- data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +892 -0
- data/ext/sources/ggml/src/ggml-cpu/unary-ops.cpp +186 -0
- data/ext/sources/ggml/src/ggml-cpu/unary-ops.h +28 -0
- data/ext/sources/ggml/src/ggml-cpu/vec.cpp +252 -0
- data/ext/sources/ggml/src/ggml-cpu/vec.h +818 -0
- data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +184 -0
- data/ext/sources/ggml/src/ggml-cuda/acc.cu +61 -0
- data/ext/sources/ggml/src/ggml-cuda/acc.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/arange.cu +34 -0
- data/ext/sources/ggml/src/ggml-cuda/arange.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/argmax.cu +91 -0
- data/ext/sources/ggml/src/ggml-cuda/argmax.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/argsort.cu +104 -0
- data/ext/sources/ggml/src/ggml-cuda/argsort.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +363 -0
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cuh +9 -0
- data/ext/sources/ggml/src/ggml-cuda/clamp.cu +45 -0
- data/ext/sources/ggml/src/ggml-cuda/clamp.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/common.cuh +828 -0
- data/ext/sources/ggml/src/ggml-cuda/concat.cu +221 -0
- data/ext/sources/ggml/src/ggml-cuda/concat.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cu +89 -0
- data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/convert.cu +730 -0
- data/ext/sources/ggml/src/ggml-cuda/convert.cuh +26 -0
- data/ext/sources/ggml/src/ggml-cuda/count-equal.cu +64 -0
- data/ext/sources/ggml/src/ggml-cuda/count-equal.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/cp-async.cuh +57 -0
- data/ext/sources/ggml/src/ggml-cuda/cpy.cu +705 -0
- data/ext/sources/ggml/src/ggml-cuda/cpy.cuh +11 -0
- data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cu +189 -0
- data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cuh +7 -0
- data/ext/sources/ggml/src/ggml-cuda/dequantize.cuh +103 -0
- data/ext/sources/ggml/src/ggml-cuda/diagmask.cu +40 -0
- data/ext/sources/ggml/src/ggml-cuda/diagmask.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +881 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +1471 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cu +357 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cu +365 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f16.cuh +482 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f32.cuh +472 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +634 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn.cu +346 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/getrows.cu +275 -0
- data/ext/sources/ggml/src/ggml-cuda/getrows.cuh +15 -0
- data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +3505 -0
- data/ext/sources/ggml/src/ggml-cuda/gla.cu +93 -0
- data/ext/sources/ggml/src/ggml-cuda/gla.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/im2col.cu +103 -0
- data/ext/sources/ggml/src/ggml-cuda/im2col.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/mma.cuh +396 -0
- data/ext/sources/ggml/src/ggml-cuda/mmq.cu +324 -0
- data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +3217 -0
- data/ext/sources/ggml/src/ggml-cuda/mmv.cu +336 -0
- data/ext/sources/ggml/src/ggml-cuda/mmv.cuh +12 -0
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +595 -0
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cuh +12 -0
- data/ext/sources/ggml/src/ggml-cuda/norm.cu +458 -0
- data/ext/sources/ggml/src/ggml-cuda/norm.cuh +11 -0
- data/ext/sources/ggml/src/ggml-cuda/opt-step-adamw.cu +78 -0
- data/ext/sources/ggml/src/ggml-cuda/opt-step-adamw.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/out-prod.cu +68 -0
- data/ext/sources/ggml/src/ggml-cuda/out-prod.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/pad.cu +49 -0
- data/ext/sources/ggml/src/ggml-cuda/pad.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/pool2d.cu +94 -0
- data/ext/sources/ggml/src/ggml-cuda/pool2d.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/quantize.cu +190 -0
- data/ext/sources/ggml/src/ggml-cuda/quantize.cuh +27 -0
- data/ext/sources/ggml/src/ggml-cuda/rope.cu +456 -0
- data/ext/sources/ggml/src/ggml-cuda/rope.cuh +7 -0
- data/ext/sources/ggml/src/ggml-cuda/scale.cu +31 -0
- data/ext/sources/ggml/src/ggml-cuda/scale.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/softmax.cu +283 -0
- data/ext/sources/ggml/src/ggml-cuda/softmax.cuh +7 -0
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +148 -0
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cu +153 -0
- data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/sum.cu +45 -0
- data/ext/sources/ggml/src/ggml-cuda/sum.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/sumrows.cu +39 -0
- data/ext/sources/ggml/src/ggml-cuda/sumrows.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_8.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_1.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_2.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_8.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_1.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_2.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_2.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_8.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_64-ncols2_1.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_1.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_2.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_8.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +78 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_s.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_s.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_s.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q2_k.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q3_k.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_k.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_k.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q6_k.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/tsembd.cu +47 -0
- data/ext/sources/ggml/src/ggml-cuda/tsembd.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/unary.cu +289 -0
- data/ext/sources/ggml/src/ggml-cuda/unary.cuh +59 -0
- data/ext/sources/ggml/src/ggml-cuda/upscale.cu +51 -0
- data/ext/sources/ggml/src/ggml-cuda/upscale.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +1135 -0
- data/ext/sources/ggml/src/ggml-cuda/vendors/cuda.h +15 -0
- data/ext/sources/ggml/src/ggml-cuda/vendors/hip.h +243 -0
- data/ext/sources/ggml/src/ggml-cuda/vendors/musa.h +140 -0
- data/ext/sources/ggml/src/ggml-cuda/wkv.cu +199 -0
- data/ext/sources/ggml/src/ggml-cuda/wkv.cuh +7 -0
- data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +131 -0
- data/ext/sources/ggml/src/ggml-impl.h +601 -0
- data/ext/sources/ggml/src/ggml-kompute/CMakeLists.txt +166 -0
- data/ext/sources/ggml/src/ggml-kompute/ggml-kompute.cpp +2251 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/common.comp +112 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_add.comp +58 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_addrow.comp +25 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f16.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f32.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f16.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f32.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_diagmask.comp +30 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_gelu.comp +22 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows.comp +17 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f16.comp +31 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f32.comp +31 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_0.comp +38 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_1.comp +39 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q6_k.comp +44 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp +69 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_mat_f32.comp +51 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_0.comp +33 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_1.comp +35 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp +140 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp +106 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q8_0.comp +73 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp +28 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_norm.comp +84 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_relu.comp +21 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rmsnorm.comp +53 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale.comp +19 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale_8.comp +23 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_silu.comp +22 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp +72 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp +71 -0
- data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +120 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +622 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.m +5998 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal.metal +7089 -0
- data/ext/sources/ggml/src/ggml-musa/CMakeLists.txt +113 -0
- data/ext/sources/ggml/src/ggml-musa/mudnn.cu +112 -0
- data/ext/sources/ggml/src/ggml-musa/mudnn.cuh +12 -0
- data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +96 -0
- data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +5124 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/add.cl +83 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/clamp.cl +20 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cpy.cl +184 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +118 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/diag_mask_inf.cl +58 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/embed_kernel.py +26 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gelu.cl +62 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle.cl +268 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general.cl +274 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/get_rows.cl +163 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f16.cl +57 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f32.cl +57 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul.cl +79 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mat_Ab_Bi_8x4.cl +139 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f16.cl +118 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32.cl +118 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_1row.cl +94 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_l4.cl +84 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f32_f32.cl +118 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32.cl +192 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_16x_flat.cl +307 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_8x_flat.cl +265 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_8x_flat.cl +272 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_v.cl +254 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q6_k.cl +190 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/norm.cl +81 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/relu.cl +16 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/rms_norm.cl +96 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/rope.cl +721 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +16 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/silu.cl +30 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f16.cl +87 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f32.cl +87 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f16.cl +86 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f32.cl +86 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +84 -0
- data/ext/sources/ggml/src/ggml-opt.cpp +1037 -0
- data/ext/sources/ggml/src/ggml-quants.c +5232 -0
- data/ext/sources/ggml/src/ggml-quants.h +100 -0
- data/ext/sources/ggml/src/ggml-rpc/CMakeLists.txt +9 -0
- data/ext/sources/ggml/src/ggml-rpc/ggml-rpc.cpp +1813 -0
- data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +189 -0
- data/ext/sources/ggml/src/ggml-sycl/backend.hpp +37 -0
- data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +345 -0
- data/ext/sources/ggml/src/ggml-sycl/binbcast.hpp +39 -0
- data/ext/sources/ggml/src/ggml-sycl/common.cpp +83 -0
- data/ext/sources/ggml/src/ggml-sycl/common.hpp +589 -0
- data/ext/sources/ggml/src/ggml-sycl/concat.cpp +195 -0
- data/ext/sources/ggml/src/ggml-sycl/concat.hpp +20 -0
- data/ext/sources/ggml/src/ggml-sycl/conv.cpp +101 -0
- data/ext/sources/ggml/src/ggml-sycl/conv.hpp +20 -0
- data/ext/sources/ggml/src/ggml-sycl/convert.cpp +623 -0
- data/ext/sources/ggml/src/ggml-sycl/convert.hpp +34 -0
- data/ext/sources/ggml/src/ggml-sycl/cpy.cpp +700 -0
- data/ext/sources/ggml/src/ggml-sycl/cpy.hpp +11 -0
- data/ext/sources/ggml/src/ggml-sycl/dequantize.hpp +791 -0
- data/ext/sources/ggml/src/ggml-sycl/dmmv.cpp +1162 -0
- data/ext/sources/ggml/src/ggml-sycl/dmmv.hpp +27 -0
- data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +2957 -0
- data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +1511 -0
- data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +75 -0
- data/ext/sources/ggml/src/ggml-sycl/gemm.hpp +99 -0
- data/ext/sources/ggml/src/ggml-sycl/getrows.cpp +309 -0
- data/ext/sources/ggml/src/ggml-sycl/getrows.hpp +20 -0
- data/ext/sources/ggml/src/ggml-sycl/ggml-sycl.cpp +4493 -0
- data/ext/sources/ggml/src/ggml-sycl/gla.cpp +106 -0
- data/ext/sources/ggml/src/ggml-sycl/gla.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/im2col.cpp +136 -0
- data/ext/sources/ggml/src/ggml-sycl/im2col.hpp +21 -0
- data/ext/sources/ggml/src/ggml-sycl/mmq.cpp +3030 -0
- data/ext/sources/ggml/src/ggml-sycl/mmq.hpp +33 -0
- data/ext/sources/ggml/src/ggml-sycl/mmvq.cpp +1110 -0
- data/ext/sources/ggml/src/ggml-sycl/mmvq.hpp +27 -0
- data/ext/sources/ggml/src/ggml-sycl/norm.cpp +501 -0
- data/ext/sources/ggml/src/ggml-sycl/norm.hpp +26 -0
- data/ext/sources/ggml/src/ggml-sycl/outprod.cpp +47 -0
- data/ext/sources/ggml/src/ggml-sycl/outprod.hpp +10 -0
- data/ext/sources/ggml/src/ggml-sycl/presets.hpp +74 -0
- data/ext/sources/ggml/src/ggml-sycl/quants.hpp +83 -0
- data/ext/sources/ggml/src/ggml-sycl/rope.cpp +361 -0
- data/ext/sources/ggml/src/ggml-sycl/rope.hpp +20 -0
- data/ext/sources/ggml/src/ggml-sycl/softmax.cpp +261 -0
- data/ext/sources/ggml/src/ggml-sycl/softmax.hpp +20 -0
- data/ext/sources/ggml/src/ggml-sycl/sycl_hw.cpp +13 -0
- data/ext/sources/ggml/src/ggml-sycl/sycl_hw.hpp +23 -0
- data/ext/sources/ggml/src/ggml-sycl/tsembd.cpp +72 -0
- data/ext/sources/ggml/src/ggml-sycl/tsembd.hpp +20 -0
- data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +1215 -0
- data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +293 -0
- data/ext/sources/ggml/src/ggml-sycl/wkv.hpp +10 -0
- data/ext/sources/ggml/src/ggml-threading.cpp +12 -0
- data/ext/sources/ggml/src/ggml-threading.h +14 -0
- data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +196 -0
- data/ext/sources/ggml/src/ggml-vulkan/cmake/host-toolchain.cmake.in +15 -0
- data/ext/sources/ggml/src/ggml-vulkan/ggml-vulkan.cpp +10700 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +39 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +29 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +29 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +51 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +69 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp +17 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp +41 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp +49 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp +105 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp +23 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +51 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +242 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp +17 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/count_equal.comp +31 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp +20 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +462 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +699 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_head.comp +13 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_m.comp +42 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_s.comp +35 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +44 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp +43 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +48 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +39 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +49 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp +32 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp +34 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +34 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +42 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp +30 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp +32 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +68 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp +34 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp +35 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +70 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +33 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp +31 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp +34 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/div.comp +27 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +337 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.comp +162 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +360 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +267 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +59 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp +25 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp +23 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp +64 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_head.comp +9 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp +76 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +33 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +41 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp +66 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +100 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +41 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp +27 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp +48 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +169 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +118 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_m.comp +82 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp +79 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_s.comp +90 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xs.comp +87 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xxs.comp +87 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_s.comp +90 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_xxs.comp +88 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +118 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp +154 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +130 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +132 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +136 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +167 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +130 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +868 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +441 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +442 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +99 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp +44 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_adamw.comp +42 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +28 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp +74 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +77 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp +26 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat_back.comp +37 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +52 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_back.comp +55 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +58 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +60 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +43 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +43 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +47 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +24 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sigmoid.comp +20 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu_back.comp +26 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp +17 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +173 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +50 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/square.comp +17 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sub.comp +29 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +37 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp +20 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_bfloat16_support.comp +7 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp +7 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp +7 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_integer_dot_support.comp +7 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +41 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +1373 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +36 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +751 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp +87 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/wkv7.comp +91 -0
- data/ext/sources/ggml/src/ggml.c +6550 -0
- data/ext/sources/ggml/src/gguf.cpp +1330 -0
- data/ext/{whisper.h → sources/include/whisper.h} +91 -24
- data/ext/sources/src/CMakeLists.txt +143 -0
- data/ext/sources/src/coreml/whisper-decoder-impl.h +158 -0
- data/ext/sources/src/coreml/whisper-decoder-impl.m +226 -0
- data/ext/sources/src/coreml/whisper-encoder-impl.h +154 -0
- data/ext/sources/src/coreml/whisper-encoder-impl.m +222 -0
- data/ext/sources/src/coreml/whisper-encoder.h +26 -0
- data/ext/sources/src/coreml/whisper-encoder.mm +73 -0
- data/ext/sources/src/openvino/whisper-openvino-encoder.cpp +108 -0
- data/ext/sources/src/openvino/whisper-openvino-encoder.h +31 -0
- data/ext/sources/src/whisper-arch.h +197 -0
- data/ext/{whisper.cpp → sources/src/whisper.cpp} +2535 -835
- data/ext/sources/tests/CMakeLists.txt +105 -0
- data/ext/sources/tests/earnings21/eval.mk +58 -0
- data/ext/sources/tests/earnings21/eval.py +68 -0
- data/ext/sources/tests/earnings21/normalizers/__init__.py +2 -0
- data/ext/sources/tests/earnings21/normalizers/basic.py +80 -0
- data/ext/sources/tests/earnings21/normalizers/english.json +1741 -0
- data/ext/sources/tests/earnings21/normalizers/english.py +550 -0
- data/ext/sources/tests/earnings21/requirements.txt +6 -0
- data/ext/sources/tests/en-0-ref.txt +1 -0
- data/ext/sources/tests/en-1-ref.txt +1 -0
- data/ext/sources/tests/en-2-ref.txt +1 -0
- data/ext/sources/tests/es-0-ref.txt +1 -0
- data/ext/sources/tests/librispeech/eval.mk +39 -0
- data/ext/sources/tests/librispeech/eval.py +47 -0
- data/ext/sources/tests/librispeech/normalizers/__init__.py +2 -0
- data/ext/sources/tests/librispeech/normalizers/basic.py +80 -0
- data/ext/sources/tests/librispeech/normalizers/english.json +1741 -0
- data/ext/sources/tests/librispeech/normalizers/english.py +550 -0
- data/ext/sources/tests/librispeech/requirements.txt +6 -0
- data/ext/sources/tests/run-tests.sh +130 -0
- data/ext/sources/tests/test-c.c +3 -0
- data/ext/sources/tests/test-vad-full.cpp +54 -0
- data/ext/sources/tests/test-vad.cpp +83 -0
- data/ext/sources/tests/test-whisper.js +58 -0
- data/extsources.rb +34 -0
- data/lib/whisper/model/uri.rb +178 -0
- data/sig/whisper.rbs +480 -0
- data/tests/helper.rb +35 -0
- data/tests/jfk_reader/.gitignore +5 -0
- data/tests/jfk_reader/extconf.rb +3 -0
- data/tests/jfk_reader/jfk_reader.c +68 -0
- data/tests/test_callback.rb +202 -0
- data/tests/test_error.rb +20 -0
- data/tests/test_model.rb +109 -0
- data/tests/test_package.rb +46 -0
- data/tests/test_params.rb +297 -0
- data/tests/test_segment.rb +74 -0
- data/tests/test_vad.rb +19 -0
- data/tests/test_vad_params.rb +103 -0
- data/tests/test_whisper.rb +212 -124
- data/whispercpp.gemspec +37 -0
- metadata +794 -13
- data/ext/dr_wav.h +0 -6434
- data/ext/ggml.c +0 -21755
- data/ext/ruby_whisper.cpp +0 -426
@@ -0,0 +1,1215 @@
|
|
1
|
+
//
|
2
|
+
// MIT license
|
3
|
+
// Copyright (C) 2025 Intel Corporation
|
4
|
+
// SPDX-License-Identifier: MIT
|
5
|
+
//
|
6
|
+
|
7
|
+
//
|
8
|
+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
9
|
+
// See https://llvm.org/LICENSE.txt for license information.
|
10
|
+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
11
|
+
//
|
12
|
+
|
13
|
+
#ifndef GGML_SYCL_VECDOTQ_HPP
|
14
|
+
#define GGML_SYCL_VECDOTQ_HPP
|
15
|
+
|
16
|
+
#include "dpct/helper.hpp"
|
17
|
+
#include "ggml.h"
|
18
|
+
#include "quants.hpp"
|
19
|
+
|
20
|
+
typedef float (*vec_dot_q_sycl_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1,
|
21
|
+
const int & iqs);
|
22
|
+
|
23
|
+
static __dpct_inline__ int get_int_from_int8(const int8_t* x8, const int& i32) {
|
24
|
+
const uint16_t* x16 =
|
25
|
+
(const uint16_t*)(x8 + sizeof(int) * i32); // assume at least 2 byte
|
26
|
+
// alignment
|
27
|
+
|
28
|
+
int x32 = 0;
|
29
|
+
x32 |= x16[0] << 0;
|
30
|
+
x32 |= x16[1] << 16;
|
31
|
+
|
32
|
+
return x32;
|
33
|
+
}
|
34
|
+
|
35
|
+
static __dpct_inline__ int get_int_from_uint8(
|
36
|
+
const uint8_t* x8,
|
37
|
+
const int& i32) {
|
38
|
+
const uint16_t* x16 =
|
39
|
+
(const uint16_t*)(x8 + sizeof(int) * i32); // assume at least 2 byte
|
40
|
+
// alignment
|
41
|
+
|
42
|
+
int x32 = 0;
|
43
|
+
x32 |= x16[0] << 0;
|
44
|
+
x32 |= x16[1] << 16;
|
45
|
+
|
46
|
+
return x32;
|
47
|
+
}
|
48
|
+
|
49
|
+
static __dpct_inline__ int get_int_from_int8_aligned(
|
50
|
+
const int8_t* x8,
|
51
|
+
const int& i32) {
|
52
|
+
return *(
|
53
|
+
(const int*)(x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
|
54
|
+
}
|
55
|
+
|
56
|
+
static __dpct_inline__ int get_int_from_uint8_aligned(
|
57
|
+
const uint8_t* x8,
|
58
|
+
const int& i32) {
|
59
|
+
return *(
|
60
|
+
(const int*)(x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
|
61
|
+
}
|
62
|
+
|
63
|
+
static __dpct_inline__ void get_int_from_table_16(const uint32_t &q4,
|
64
|
+
const uint8_t *values,
|
65
|
+
int &val1, int &val2) {
|
66
|
+
|
67
|
+
uint32_t aux32; const uint8_t * q8 = (const uint8_t *)&aux32;
|
68
|
+
aux32 = q4 & 0x0f0f0f0f;
|
69
|
+
uint16_t v1 = values[q8[0]] | (values[q8[1]] << 8);
|
70
|
+
uint16_t v2 = values[q8[2]] | (values[q8[3]] << 8);
|
71
|
+
val1 = v1 | (v2 << 16);
|
72
|
+
aux32 = (q4 >> 4) & 0x0f0f0f0f;
|
73
|
+
v1 = values[q8[0]] | (values[q8[1]] << 8);
|
74
|
+
v2 = values[q8[2]] | (values[q8[3]] << 8);
|
75
|
+
val2 = v1 | (v2 << 16);
|
76
|
+
}
|
77
|
+
|
78
|
+
#define VDR_Q2_K_Q8_1_MMVQ 1
|
79
|
+
|
80
|
+
// contiguous v/x values
|
81
|
+
static __dpct_inline__ float vec_dot_q2_K_q8_1_impl_mmvq(
|
82
|
+
const int &v, const int *__restrict__ u, const uint8_t *__restrict__ scales,
|
83
|
+
const sycl::half2 &dm2, const float *__restrict__ d8) {
|
84
|
+
|
85
|
+
float sumf_d = 0.0f;
|
86
|
+
float sumf_m = 0.0f;
|
87
|
+
|
88
|
+
#pragma unroll
|
89
|
+
for (int i = 0; i < QR2_K; ++i) {
|
90
|
+
const int sc = scales[2*i];
|
91
|
+
|
92
|
+
const int vi = (v >> (2*i)) & 0x03030303;
|
93
|
+
|
94
|
+
sumf_d +=
|
95
|
+
d8[i] * (dpct::dp4a(vi, u[i], 0) * (sc & 0xF)); // SIMD dot product
|
96
|
+
|
97
|
+
// fill int with 4x m
|
98
|
+
int m = sc >> 4;
|
99
|
+
m |= m << 8;
|
100
|
+
m |= m << 16;
|
101
|
+
sumf_m += d8[i] *
|
102
|
+
dpct::dp4a(
|
103
|
+
m, u[i],
|
104
|
+
0); // multiply constant q2_K part with sum of q8_1 values
|
105
|
+
}
|
106
|
+
|
107
|
+
const sycl::float2 dm2f =
|
108
|
+
dm2.convert<float, sycl::rounding_mode::automatic>();
|
109
|
+
|
110
|
+
return dm2f.x() * sumf_d - dm2f.y() * sumf_m;
|
111
|
+
}
|
112
|
+
|
113
|
+
|
114
|
+
#define VDR_Q3_K_Q8_1_MMVQ 1
|
115
|
+
|
116
|
+
// contiguous v/x values
|
117
|
+
static __dpct_inline__ float vec_dot_q3_K_q8_1_impl_mmvq(
|
118
|
+
const int &vl, const int &vh, const int *__restrict__ u,
|
119
|
+
const uint8_t *__restrict__ scales, const int &scale_offset,
|
120
|
+
const float &d3, const float *__restrict__ d8) {
|
121
|
+
|
122
|
+
float sumf = 0.0f;
|
123
|
+
|
124
|
+
#pragma unroll
|
125
|
+
for (int i = 0; i < QR3_K; ++i) {
|
126
|
+
const int isc = scale_offset + 2*i;
|
127
|
+
|
128
|
+
const int isc_low = isc % (QK_K/32);
|
129
|
+
const int sc_shift_low = 4 * (isc / (QK_K/32));
|
130
|
+
const int sc_low = (scales[isc_low] >> sc_shift_low) & 0xF;
|
131
|
+
|
132
|
+
const int isc_high = isc % (QK_K/64);
|
133
|
+
const int sc_shift_high = 2 * (isc / (QK_K/64));
|
134
|
+
const int sc_high = ((scales[(QK_K/32) + isc_high] >> sc_shift_high) & 3) << 4;
|
135
|
+
|
136
|
+
const int sc = (sc_low | sc_high) - 32;
|
137
|
+
|
138
|
+
const int vil = (vl >> (2*i)) & 0x03030303;
|
139
|
+
|
140
|
+
const int vih = ((vh >> i) << 2) & 0x04040404;
|
141
|
+
|
142
|
+
const int vi =
|
143
|
+
dpct::vectorized_binary<sycl::char4>(vil, vih, dpct::sub_sat());
|
144
|
+
|
145
|
+
sumf += d8[i] * (dpct::dp4a(vi, u[i], 0) * sc); // SIMD dot product
|
146
|
+
}
|
147
|
+
|
148
|
+
return d3 * sumf;
|
149
|
+
}
|
150
|
+
|
151
|
+
#define VDR_Q4_K_Q8_1_MMVQ 2
|
152
|
+
|
153
|
+
// contiguous v/x values
|
154
|
+
static __dpct_inline__ float vec_dot_q4_K_q8_1_impl_vmmq(
|
155
|
+
const int *__restrict__ v, const int *__restrict__ u,
|
156
|
+
const uint8_t *__restrict__ sc, const uint8_t *__restrict__ m,
|
157
|
+
const sycl::half2 &dm4, const float *__restrict__ d8) {
|
158
|
+
|
159
|
+
float sumf_d = 0.0f;
|
160
|
+
float sumf_m = 0.0f;
|
161
|
+
|
162
|
+
#pragma unroll
|
163
|
+
for (int i = 0; i < QR4_K; ++i) {
|
164
|
+
const int v0i = (v[0] >> (4*i)) & 0x0F0F0F0F;
|
165
|
+
const int v1i = (v[1] >> (4*i)) & 0x0F0F0F0F;
|
166
|
+
|
167
|
+
const int dot1 =
|
168
|
+
dpct::dp4a(v1i, u[2 * i + 1],
|
169
|
+
dpct::dp4a(v0i, u[2 * i + 0], 0)); // SIMD dot product
|
170
|
+
const int dot2 =
|
171
|
+
dpct::dp4a(0x01010101, u[2 * i + 1],
|
172
|
+
dpct::dp4a(0x01010101, u[2 * i + 0], 0)); // sum of u
|
173
|
+
|
174
|
+
sumf_d += d8[i] * (dot1 * sc[i]);
|
175
|
+
sumf_m += d8[i] * (dot2 * m[i]); // multiply constant part of q4_K with sum of q8_1 values
|
176
|
+
}
|
177
|
+
|
178
|
+
const sycl::float2 dm4f =
|
179
|
+
dm4.convert<float, sycl::rounding_mode::automatic>();
|
180
|
+
|
181
|
+
return dm4f.x() * sumf_d - dm4f.y() * sumf_m;
|
182
|
+
}
|
183
|
+
|
184
|
+
|
185
|
+
#define VDR_Q5_K_Q8_1_MMVQ 2
|
186
|
+
|
187
|
+
// contiguous v/x values
|
188
|
+
static __dpct_inline__ float vec_dot_q5_K_q8_1_impl_vmmq(
|
189
|
+
const int *__restrict__ vl, const int *__restrict__ vh,
|
190
|
+
const int *__restrict__ u, const uint8_t *__restrict__ sc,
|
191
|
+
const uint8_t *__restrict__ m, const sycl::half2 &dm5,
|
192
|
+
const float *__restrict__ d8) {
|
193
|
+
|
194
|
+
float sumf_d = 0.0f;
|
195
|
+
float sumf_m = 0.0f;
|
196
|
+
|
197
|
+
#pragma unroll
|
198
|
+
for (int i = 0; i < QR5_K; ++i) {
|
199
|
+
const int vl0i = (vl[0] >> (4*i)) & 0x0F0F0F0F;
|
200
|
+
const int vl1i = (vl[1] >> (4*i)) & 0x0F0F0F0F;
|
201
|
+
|
202
|
+
const int vh0i = ((vh[0] >> i) << 4) & 0x10101010;
|
203
|
+
const int vh1i = ((vh[1] >> i) << 4) & 0x10101010;
|
204
|
+
|
205
|
+
const int v0i = vl0i | vh0i;
|
206
|
+
const int v1i = vl1i | vh1i;
|
207
|
+
|
208
|
+
const int dot1 =
|
209
|
+
dpct::dp4a(v0i, u[2 * i + 0],
|
210
|
+
dpct::dp4a(v1i, u[2 * i + 1], 0)); // SIMD dot product
|
211
|
+
const int dot2 =
|
212
|
+
dpct::dp4a(0x01010101, u[2 * i + 0],
|
213
|
+
dpct::dp4a(0x01010101, u[2 * i + 1], 0)); // sum of u
|
214
|
+
|
215
|
+
sumf_d += d8[i] * (dot1 * sc[i]);
|
216
|
+
sumf_m += d8[i] * (dot2 * m[i]);
|
217
|
+
|
218
|
+
}
|
219
|
+
|
220
|
+
const sycl::float2 dm5f =
|
221
|
+
dm5.convert<float, sycl::rounding_mode::automatic>();
|
222
|
+
|
223
|
+
return dm5f.x() * sumf_d - dm5f.y() * sumf_m;
|
224
|
+
}
|
225
|
+
|
226
|
+
|
227
|
+
#define VDR_Q6_K_Q8_1_MMVQ 1
|
228
|
+
|
229
|
+
// contiguous v/x values
|
230
|
+
static __dpct_inline__ float
|
231
|
+
vec_dot_q6_K_q8_1_impl_mmvq(const int &vl, const int &vh,
|
232
|
+
const int *__restrict__ u,
|
233
|
+
const int8_t *__restrict__ scales, const float &d,
|
234
|
+
const float *__restrict__ d8) {
|
235
|
+
|
236
|
+
float sumf = 0.0f;
|
237
|
+
|
238
|
+
#pragma unroll
|
239
|
+
for (int i = 0; i < QR6_K; ++i) {
|
240
|
+
const int sc = scales[4*i];
|
241
|
+
|
242
|
+
const int vil = (vl >> (4*i)) & 0x0F0F0F0F;
|
243
|
+
|
244
|
+
const int vih = ((vh >> (4*i)) << 4) & 0x30303030;
|
245
|
+
|
246
|
+
const int vi = dpct::vectorized_binary<sycl::char4>(
|
247
|
+
(vil | vih), 0x20202020, dpct::sub_sat()); // vi = (vil | vih) - 32
|
248
|
+
|
249
|
+
sumf += d8[i] * (dpct::dp4a(vi, u[i], 0) * sc); // SIMD dot product
|
250
|
+
}
|
251
|
+
|
252
|
+
return d*sumf;
|
253
|
+
}
|
254
|
+
|
255
|
+
// VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called
|
256
|
+
// MMVQ = mul_mat_vec_q, MMQ = mul_mat_q
|
257
|
+
|
258
|
+
template <ggml_type T> struct reorder_vec_dot_q_sycl {
|
259
|
+
static_assert(T != T, "ggml_type for reorder vecdot not implemented");
|
260
|
+
};
|
261
|
+
|
262
|
+
template <> struct reorder_vec_dot_q_sycl<GGML_TYPE_Q4_0> {
|
263
|
+
static constexpr ggml_type gtype = GGML_TYPE_Q4_0;
|
264
|
+
|
265
|
+
using q4_0_block = ggml_sycl_reordered::block_q_t<GGML_TYPE_Q4_0>;
|
266
|
+
using q4_0_traits = typename q4_0_block::traits;
|
267
|
+
|
268
|
+
__dpct_inline__ float vec_dot_q4_0_q8_1_impl(const int * v, const int * u, const float & d4, const sycl::half2 & ds8) {
|
269
|
+
int sumi = 0;
|
270
|
+
|
271
|
+
#pragma unroll
|
272
|
+
for (size_t i = 0; i < q4_0_traits::vdr_mmvq; ++i) {
|
273
|
+
const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
|
274
|
+
const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
|
275
|
+
|
276
|
+
// SIMD dot product of quantized values
|
277
|
+
sumi = dpct::dp4a(vi0, u[2 * i + 0], sumi);
|
278
|
+
sumi = dpct::dp4a(vi1, u[2 * i + 1], sumi);
|
279
|
+
}
|
280
|
+
|
281
|
+
const sycl::float2 ds8f = ds8.convert<float, sycl::rounding_mode::automatic>();
|
282
|
+
|
283
|
+
// second part effectively subtracts 8 from each quant value
|
284
|
+
return d4 * (sumi * ds8f.x() - (8 * q4_0_traits::vdr_mmvq / q4_0_traits::qi) * ds8f.y());
|
285
|
+
}
|
286
|
+
|
287
|
+
__dpct_inline__ float operator()(const void * __restrict__ vbq, const int ibx_offset, const int d_offset,
|
288
|
+
const block_q8_1 * __restrict__ bq8_1, const int & iqs, int /* nblocks */) {
|
289
|
+
const uint8_t * bq4_0 = static_cast<const uint8_t *>(vbq) + ibx_offset;
|
290
|
+
const ggml_half d = *(reinterpret_cast<const ggml_half *>(static_cast<const uint8_t *>(vbq) + d_offset));
|
291
|
+
int v[q4_0_traits::vdr_mmvq];
|
292
|
+
int u[2 * q4_0_traits::vdr_mmvq];
|
293
|
+
|
294
|
+
#pragma unroll
|
295
|
+
|
296
|
+
for (size_t i = 0; i < q4_0_traits::vdr_mmvq; ++i) {
|
297
|
+
v[i] = get_int_from_uint8(bq4_0, iqs + i);
|
298
|
+
u[2 * i + 0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
|
299
|
+
u[2 * i + 1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + q4_0_traits::qi);
|
300
|
+
}
|
301
|
+
|
302
|
+
return vec_dot_q4_0_q8_1_impl(v, u, d, bq8_1->ds);
|
303
|
+
};
|
304
|
+
};
|
305
|
+
|
306
|
+
static inline float vec_dot_q4_K_q8_1_common(const int * __restrict__ q4, const uint16_t * __restrict__ scales,
|
307
|
+
const ggml_half2 & dm, const block_q8_1 * __restrict__ bq8_1,
|
308
|
+
const int & iqs) {
|
309
|
+
int v[2];
|
310
|
+
int u[2 * QR4_K];
|
311
|
+
float d8[QR4_K];
|
312
|
+
|
313
|
+
v[0] = q4[0];
|
314
|
+
v[1] = q4[4];
|
315
|
+
|
316
|
+
uint16_t aux[2];
|
317
|
+
const int j = (QR4_K * ((iqs / 2) / (QI8_1 / 2))) / 2;
|
318
|
+
if (j < 2) {
|
319
|
+
aux[0] = scales[j + 0] & 0x3f3f;
|
320
|
+
aux[1] = scales[j + 2] & 0x3f3f;
|
321
|
+
} else {
|
322
|
+
aux[0] = ((scales[j + 2] >> 0) & 0x0f0f) | ((scales[j - 2] & 0xc0c0) >> 2);
|
323
|
+
aux[1] = ((scales[j + 2] >> 4) & 0x0f0f) | ((scales[j - 0] & 0xc0c0) >> 2);
|
324
|
+
}
|
325
|
+
|
326
|
+
const uint8_t * sc = (const uint8_t *) aux;
|
327
|
+
const uint8_t * m = sc + 2;
|
328
|
+
|
329
|
+
const int bq8_offset = QR4_K * ((iqs / 2) / (QI8_1 / 2));
|
330
|
+
|
331
|
+
for (int i = 0; i < QR4_K; ++i) {
|
332
|
+
const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
|
333
|
+
d8[i] = bq8i->ds[0];
|
334
|
+
|
335
|
+
const int * q8 = (const int *) bq8i->qs + ((iqs / 2) % 4);
|
336
|
+
u[2 * i + 0] = q8[0];
|
337
|
+
u[2 * i + 1] = q8[4];
|
338
|
+
}
|
339
|
+
|
340
|
+
return vec_dot_q4_K_q8_1_impl_vmmq(v, u, sc, m, dm, d8);
|
341
|
+
}
|
342
|
+
|
343
|
+
template <> struct reorder_vec_dot_q_sycl<GGML_TYPE_Q4_K> {
|
344
|
+
static constexpr ggml_type gtype = GGML_TYPE_Q4_K;
|
345
|
+
|
346
|
+
using q4_k_block = ggml_sycl_reordered::block_q_t<GGML_TYPE_Q4_K>;
|
347
|
+
using q4_k_traits = typename q4_k_block::traits;
|
348
|
+
|
349
|
+
float operator()(const void * __restrict__ vbq, const int ibx_offset, const int d_offset,
|
350
|
+
const block_q8_1 * __restrict__ bq8_1, const int & iqs, int nblocks) {
|
351
|
+
const int ib = ibx_offset / (QK_K / 2);
|
352
|
+
|
353
|
+
const uint8_t * base = static_cast<const uint8_t *>(vbq);
|
354
|
+
const uint8_t * qs = base + ibx_offset;
|
355
|
+
const int total_qs_bytes = nblocks * (QK_K / 2);
|
356
|
+
const uint8_t * scs = base + total_qs_bytes + ib * K_SCALE_SIZE;
|
357
|
+
const ggml_half2 * dms = reinterpret_cast<const ggml_half2 *>(base + d_offset);
|
358
|
+
|
359
|
+
const int bq8_offset = QR4_K * ((iqs / 2) / (QI8_1 / 2));
|
360
|
+
const int * q4 = (const int *) (qs + 16 * bq8_offset + 4 * ((iqs / 2) % 4));
|
361
|
+
const uint16_t * scales = (const uint16_t *) scs;
|
362
|
+
|
363
|
+
return vec_dot_q4_K_q8_1_common(q4, scales, *dms, bq8_1, iqs);
|
364
|
+
}
|
365
|
+
};
|
366
|
+
|
367
|
+
#define VDR_Q4_0_Q8_1_MMVQ 2
|
368
|
+
#define VDR_Q4_0_Q8_1_MMQ 4
|
369
|
+
|
370
|
+
template <int vdr>
|
371
|
+
static __dpct_inline__ float vec_dot_q4_0_q8_1_impl(const int * v, const int * u, const float & d4,
|
372
|
+
const sycl::half2 & ds8) {
|
373
|
+
int sumi = 0;
|
374
|
+
#pragma unroll
|
375
|
+
for (int i = 0; i < vdr; ++i) {
|
376
|
+
const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
|
377
|
+
const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
|
378
|
+
|
379
|
+
// SIMD dot product of quantized values
|
380
|
+
sumi = dpct::dp4a(vi0, u[2 * i + 0], sumi);
|
381
|
+
sumi = dpct::dp4a(vi1, u[2 * i + 1], sumi);
|
382
|
+
}
|
383
|
+
|
384
|
+
const sycl::float2 ds8f = ds8.convert<float, sycl::rounding_mode::automatic>();
|
385
|
+
|
386
|
+
// second part effectively subtracts 8 from each quant value
|
387
|
+
return d4 * (sumi * ds8f.x() - (8 * vdr / QI4_0) * ds8f.y());
|
388
|
+
}
|
389
|
+
|
390
|
+
#define VDR_Q4_1_Q8_1_MMVQ 2
|
391
|
+
#define VDR_Q4_1_Q8_1_MMQ 4
|
392
|
+
|
393
|
+
template <int vdr>
|
394
|
+
static __dpct_inline__ float vec_dot_q4_1_q8_1_impl(const int *v, const int *u,
|
395
|
+
const sycl::half2 &dm4,
|
396
|
+
const sycl::half2 &ds8) {
|
397
|
+
|
398
|
+
int sumi = 0;
|
399
|
+
|
400
|
+
#pragma unroll
|
401
|
+
for (int i = 0; i < vdr; ++i) {
|
402
|
+
const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
|
403
|
+
const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
|
404
|
+
|
405
|
+
// SIMD dot product of quantized values
|
406
|
+
sumi = dpct::dp4a(vi0, u[2 * i + 0], sumi);
|
407
|
+
sumi = dpct::dp4a(vi1, u[2 * i + 1], sumi);
|
408
|
+
}
|
409
|
+
|
410
|
+
#ifdef GGML_SYCL_F16
|
411
|
+
const sycl::float2 tmp =
|
412
|
+
(dm4 * ds8).convert<float, sycl::rounding_mode::automatic>();
|
413
|
+
const float d4d8 = tmp.x();
|
414
|
+
const float m4s8 = tmp.y();
|
415
|
+
#else
|
416
|
+
const sycl::float2 dm4f =
|
417
|
+
dm4.convert<float, sycl::rounding_mode::automatic>();
|
418
|
+
const sycl::float2 ds8f =
|
419
|
+
ds8.convert<float, sycl::rounding_mode::automatic>();
|
420
|
+
const float d4d8 = dm4f.x() * ds8f.x();
|
421
|
+
const float m4s8 = dm4f.y() * ds8f.y();
|
422
|
+
#endif // GGML_SYCL_F16
|
423
|
+
|
424
|
+
// scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it
|
425
|
+
return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1));
|
426
|
+
}
|
427
|
+
|
428
|
+
#define VDR_Q5_0_Q8_1_MMVQ 2
|
429
|
+
#define VDR_Q5_0_Q8_1_MMQ 4
|
430
|
+
|
431
|
+
template <int vdr>
|
432
|
+
static __dpct_inline__ float
|
433
|
+
vec_dot_q5_0_q8_1_impl(const int *vl, const int *vh, const int *u,
|
434
|
+
const float &d5, const sycl::half2 &ds8) {
|
435
|
+
int sumi = 0;
|
436
|
+
|
437
|
+
#pragma unroll
|
438
|
+
for (int i = 0; i < vdr; ++i) {
|
439
|
+
int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
|
440
|
+
vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
|
441
|
+
vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
|
442
|
+
vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
|
443
|
+
vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
|
444
|
+
sumi = dpct::dp4a(vi0, u[2 * i + 0],
|
445
|
+
sumi); // SIMD dot product of quantized values
|
446
|
+
|
447
|
+
int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
|
448
|
+
vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
|
449
|
+
vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
|
450
|
+
vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
|
451
|
+
vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
|
452
|
+
sumi = dpct::dp4a(vi1, u[2 * i + 1],
|
453
|
+
sumi); // SIMD dot product of quantized values
|
454
|
+
}
|
455
|
+
|
456
|
+
const sycl::float2 ds8f =
|
457
|
+
ds8.convert<float, sycl::rounding_mode::automatic>();
|
458
|
+
|
459
|
+
// second part effectively subtracts 16 from each quant value
|
460
|
+
return d5 * (sumi * ds8f.x() - (16 * vdr / QI5_0) * ds8f.y());
|
461
|
+
}
|
462
|
+
|
463
|
+
#define VDR_Q5_1_Q8_1_MMVQ 2
|
464
|
+
#define VDR_Q5_1_Q8_1_MMQ 4
|
465
|
+
|
466
|
+
template <int vdr>
|
467
|
+
static __dpct_inline__ float
|
468
|
+
vec_dot_q5_1_q8_1_impl(const int *vl, const int *vh, const int *u,
|
469
|
+
const sycl::half2 &dm5, const sycl::half2 &ds8) {
|
470
|
+
|
471
|
+
int sumi = 0;
|
472
|
+
|
473
|
+
#pragma unroll
|
474
|
+
for (int i = 0; i < vdr; ++i) {
|
475
|
+
int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
|
476
|
+
vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
|
477
|
+
vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
|
478
|
+
vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
|
479
|
+
vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
|
480
|
+
sumi = dpct::dp4a(vi0, u[2 * i + 0],
|
481
|
+
sumi); // SIMD dot product of quantized values
|
482
|
+
|
483
|
+
int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
|
484
|
+
vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
|
485
|
+
vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
|
486
|
+
vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
|
487
|
+
vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
|
488
|
+
sumi = dpct::dp4a(vi1, u[2 * i + 1],
|
489
|
+
sumi); // SIMD dot product of quantized values
|
490
|
+
}
|
491
|
+
|
492
|
+
#ifdef GGML_SYCL_F16
|
493
|
+
const sycl::float2 tmp =
|
494
|
+
(dm5 * ds8).convert<float, sycl::rounding_mode::automatic>();
|
495
|
+
const float d5d8 = tmp.x();
|
496
|
+
const float m5s8 = tmp.y();
|
497
|
+
|
498
|
+
|
499
|
+
#else
|
500
|
+
const sycl::float2 dm5f =
|
501
|
+
dm5.convert<float, sycl::rounding_mode::automatic>();
|
502
|
+
const sycl::float2 ds8f =
|
503
|
+
ds8.convert<float, sycl::rounding_mode::automatic>();
|
504
|
+
const float d5d8 = dm5f.x() * ds8f.x();
|
505
|
+
const float m5s8 = dm5f.y() * ds8f.y();
|
506
|
+
#endif // GGML_SYCL_F16
|
507
|
+
|
508
|
+
// scale second part of sum by QI5_1 / vdr to compensate for multiple threads adding it
|
509
|
+
return sumi*d5d8 + m5s8 / (QI5_1 / vdr);
|
510
|
+
}
|
511
|
+
|
512
|
+
#define VDR_Q8_0_Q8_1_MMVQ 2
|
513
|
+
#define VDR_Q8_0_Q8_1_MMQ 8
|
514
|
+
|
515
|
+
template <int vdr>
|
516
|
+
static __dpct_inline__ float vec_dot_q8_0_q8_1_impl(const int *v, const int *u,
|
517
|
+
const float &d8_0,
|
518
|
+
const float &d8_1) {
|
519
|
+
|
520
|
+
int sumi = 0;
|
521
|
+
|
522
|
+
#pragma unroll
|
523
|
+
for (int i = 0; i < vdr; ++i) {
|
524
|
+
// SIMD dot product of quantized values
|
525
|
+
sumi = dpct::dp4a(v[i], u[i], sumi);
|
526
|
+
}
|
527
|
+
|
528
|
+
return d8_0*d8_1 * sumi;
|
529
|
+
}
|
530
|
+
|
531
|
+
template <int vdr>
|
532
|
+
static __dpct_inline__ float vec_dot_q8_1_q8_1_impl(const int *v, const int *u,
|
533
|
+
const sycl::half2 &dm8,
|
534
|
+
const sycl::half2 &ds8) {
|
535
|
+
|
536
|
+
int sumi = 0;
|
537
|
+
|
538
|
+
#pragma unroll
|
539
|
+
for (int i = 0; i < vdr; ++i) {
|
540
|
+
// SIMD dot product of quantized values
|
541
|
+
sumi = dpct::dp4a(v[i], u[i], sumi);
|
542
|
+
}
|
543
|
+
|
544
|
+
#ifdef GGML_SYCL_F16
|
545
|
+
const sycl::float2 tmp =
|
546
|
+
(dm8 * ds8).convert<float, sycl::rounding_mode::automatic>();
|
547
|
+
const float d8d8 = tmp.x();
|
548
|
+
const float m8s8 = tmp.y();
|
549
|
+
#else
|
550
|
+
const sycl::float2 dm8f =
|
551
|
+
dm8.convert<float, sycl::rounding_mode::automatic>();
|
552
|
+
const sycl::float2 ds8f =
|
553
|
+
ds8.convert<float, sycl::rounding_mode::automatic>();
|
554
|
+
const float d8d8 = dm8f.x() * ds8f.x();
|
555
|
+
const float m8s8 = dm8f.y() * ds8f.y();
|
556
|
+
#endif // GGML_SYCL_F16
|
557
|
+
|
558
|
+
// scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it
|
559
|
+
return sumi*d8d8 + m8s8 / (QI8_1 / vdr);
|
560
|
+
}
|
561
|
+
|
562
|
+
static __dpct_inline__ float
|
563
|
+
vec_dot_q4_0_q8_1(const void *__restrict__ vbq,
|
564
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
|
565
|
+
|
566
|
+
const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq;
|
567
|
+
|
568
|
+
int v[VDR_Q4_0_Q8_1_MMVQ];
|
569
|
+
int u[2 * VDR_Q4_0_Q8_1_MMVQ];
|
570
|
+
|
571
|
+
#pragma unroll
|
572
|
+
for (int i = 0; i < VDR_Q4_0_Q8_1_MMVQ; ++i) {
|
573
|
+
v[i] = get_int_from_uint8(bq4_0->qs, iqs + i);
|
574
|
+
u[2 * i + 0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
|
575
|
+
u[2 * i + 1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_0);
|
576
|
+
}
|
577
|
+
|
578
|
+
return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMVQ>(v, u, bq4_0->d, bq8_1->ds);
|
579
|
+
}
|
580
|
+
|
581
|
+
static __dpct_inline__ float
|
582
|
+
vec_dot_q4_1_q8_1(const void *__restrict__ vbq,
|
583
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
|
584
|
+
|
585
|
+
const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq;
|
586
|
+
|
587
|
+
int v[VDR_Q4_1_Q8_1_MMVQ];
|
588
|
+
int u[2*VDR_Q4_1_Q8_1_MMVQ];
|
589
|
+
|
590
|
+
#pragma unroll
|
591
|
+
for (int i = 0; i < VDR_Q4_1_Q8_1_MMVQ; ++i) {
|
592
|
+
v[i] = get_int_from_uint8_aligned(bq4_1->qs, iqs + i);
|
593
|
+
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
|
594
|
+
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_1);
|
595
|
+
}
|
596
|
+
|
597
|
+
return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMVQ>(v, u, bq4_1->dm, bq8_1->ds);
|
598
|
+
}
|
599
|
+
|
600
|
+
static __dpct_inline__ float
|
601
|
+
vec_dot_q5_0_q8_1(const void *__restrict__ vbq,
|
602
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
|
603
|
+
|
604
|
+
const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq;
|
605
|
+
|
606
|
+
int vl[VDR_Q5_0_Q8_1_MMVQ];
|
607
|
+
int vh[VDR_Q5_0_Q8_1_MMVQ];
|
608
|
+
int u[2*VDR_Q5_0_Q8_1_MMVQ];
|
609
|
+
|
610
|
+
#pragma unroll
|
611
|
+
for (int i = 0; i < VDR_Q5_0_Q8_1_MMVQ; ++i) {
|
612
|
+
vl[i] = get_int_from_uint8(bq5_0->qs, iqs + i);
|
613
|
+
vh[i] = get_int_from_uint8(bq5_0->qh, 0) >> (4 * (iqs + i));
|
614
|
+
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
|
615
|
+
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_0);
|
616
|
+
}
|
617
|
+
|
618
|
+
return vec_dot_q5_0_q8_1_impl<VDR_Q5_0_Q8_1_MMVQ>(vl, vh, u, bq5_0->d, bq8_1->ds);
|
619
|
+
}
|
620
|
+
|
621
|
+
static __dpct_inline__ float
|
622
|
+
vec_dot_q5_1_q8_1(const void *__restrict__ vbq,
|
623
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
|
624
|
+
|
625
|
+
const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq;
|
626
|
+
|
627
|
+
int vl[VDR_Q5_1_Q8_1_MMVQ];
|
628
|
+
int vh[VDR_Q5_1_Q8_1_MMVQ];
|
629
|
+
int u[2*VDR_Q5_1_Q8_1_MMVQ];
|
630
|
+
|
631
|
+
#pragma unroll
|
632
|
+
for (int i = 0; i < VDR_Q5_1_Q8_1_MMVQ; ++i) {
|
633
|
+
vl[i] = get_int_from_uint8_aligned(bq5_1->qs, iqs + i);
|
634
|
+
vh[i] = get_int_from_uint8_aligned(bq5_1->qh, 0) >> (4 * (iqs + i));
|
635
|
+
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
|
636
|
+
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_1);
|
637
|
+
}
|
638
|
+
|
639
|
+
return vec_dot_q5_1_q8_1_impl<VDR_Q5_1_Q8_1_MMVQ>(vl, vh, u, bq5_1->dm, bq8_1->ds);
|
640
|
+
}
|
641
|
+
|
642
|
+
static __dpct_inline__ float
|
643
|
+
vec_dot_q8_0_q8_1(const void *__restrict__ vbq,
|
644
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
|
645
|
+
|
646
|
+
const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq;
|
647
|
+
|
648
|
+
int v[VDR_Q8_0_Q8_1_MMVQ];
|
649
|
+
int u[VDR_Q8_0_Q8_1_MMVQ];
|
650
|
+
|
651
|
+
#pragma unroll
|
652
|
+
for (int i = 0; i < VDR_Q8_0_Q8_1_MMVQ; ++i) {
|
653
|
+
v[i] = get_int_from_int8(bq8_0->qs, iqs + i);
|
654
|
+
u[i] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
|
655
|
+
}
|
656
|
+
|
657
|
+
return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMVQ>(v, u, bq8_0->d,
|
658
|
+
bq8_1->ds[0]);
|
659
|
+
}
|
660
|
+
|
661
|
+
static __dpct_inline__ float
|
662
|
+
vec_dot_q2_K_q8_1(const void *__restrict__ vbq,
|
663
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
|
664
|
+
|
665
|
+
const block_q2_K * bq2_K = (const block_q2_K *) vbq;
|
666
|
+
|
667
|
+
const int bq8_offset = QR2_K * (iqs / QI8_1);
|
668
|
+
const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
|
669
|
+
|
670
|
+
const uint8_t * scales = bq2_K->scales + scale_offset;
|
671
|
+
|
672
|
+
const int v = get_int_from_uint8_aligned(bq2_K->qs, iqs);
|
673
|
+
int u[QR2_K];
|
674
|
+
float d8[QR2_K];
|
675
|
+
|
676
|
+
#pragma unroll
|
677
|
+
for (int i = 0; i < QR2_K; ++ i) {
|
678
|
+
u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
|
679
|
+
d8[i] = bq8_1[bq8_offset + i].ds[0];
|
680
|
+
}
|
681
|
+
|
682
|
+
return vec_dot_q2_K_q8_1_impl_mmvq(v, u, scales, bq2_K->dm, d8);
|
683
|
+
}
|
684
|
+
|
685
|
+
static __dpct_inline__ float
|
686
|
+
vec_dot_q3_K_q8_1(const void *__restrict__ vbq,
|
687
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
|
688
|
+
|
689
|
+
const block_q3_K * bq3_K = (const block_q3_K *) vbq;
|
690
|
+
|
691
|
+
const int bq8_offset = QR3_K * (iqs / (QI3_K/2));
|
692
|
+
const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
|
693
|
+
|
694
|
+
const float d = bq3_K->d;
|
695
|
+
|
696
|
+
const int vl = get_int_from_uint8(bq3_K->qs, iqs);
|
697
|
+
|
698
|
+
// invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
|
699
|
+
const int vh = ~get_int_from_uint8(bq3_K->hmask, iqs % (QI3_K/2)) >> bq8_offset;
|
700
|
+
|
701
|
+
int u[QR3_K];
|
702
|
+
float d8[QR3_K];
|
703
|
+
|
704
|
+
#pragma unroll
|
705
|
+
for (int i = 0; i < QR3_K; ++i) {
|
706
|
+
u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
|
707
|
+
d8[i] = bq8_1[bq8_offset + i].ds[0];
|
708
|
+
}
|
709
|
+
|
710
|
+
return vec_dot_q3_K_q8_1_impl_mmvq(vl, vh, u, bq3_K->scales, scale_offset, d, d8);
|
711
|
+
}
|
712
|
+
|
713
|
+
static __dpct_inline__ float vec_dot_q4_K_q8_1(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1,
|
714
|
+
const int & iqs) {
|
715
|
+
#ifndef GGML_QKK_64
|
716
|
+
|
717
|
+
const block_q4_K * bq4_K = (const block_q4_K *) vbq;
|
718
|
+
|
719
|
+
const int bq8_offset = QR4_K * ((iqs / 2) / (QI8_1 / 2));
|
720
|
+
const int * q4 = (const int *) (bq4_K->qs + 16 * bq8_offset + 4 * ((iqs / 2) % 4));
|
721
|
+
const uint16_t * scales = (const uint16_t *) bq4_K->scales;
|
722
|
+
|
723
|
+
return vec_dot_q4_K_q8_1_common(q4, scales, bq4_K->dm, bq8_1, iqs);
|
724
|
+
|
725
|
+
#else
|
726
|
+
|
727
|
+
#if __SYCL_ARCH__ >= VER_4VEC // lowest compute capability for integer intrinsics
|
728
|
+
const block_q4_K * bq4_K = (const block_q4_K *) vbq;
|
729
|
+
|
730
|
+
float sumf_d = 0.0f;
|
731
|
+
float sumf_m = 0.0f;
|
732
|
+
|
733
|
+
uint16_t aux16[2];
|
734
|
+
const uint8_t * s = (const uint8_t *)aux16;
|
735
|
+
|
736
|
+
const uint16_t * a = (const uint16_t *)bq4_K->scales;
|
737
|
+
aux16[0] = a[0] & 0x0f0f;
|
738
|
+
aux16[1] = (a[0] >> 4) & 0x0f0f;
|
739
|
+
|
740
|
+
const float dall = bq4_K->dm[0];
|
741
|
+
const float dmin = bq4_K->dm[1];
|
742
|
+
|
743
|
+
const float d8_1 = bq8_1[0].ds[0];
|
744
|
+
const float d8_2 = bq8_1[1].ds[1];
|
745
|
+
|
746
|
+
const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2));
|
747
|
+
const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4);
|
748
|
+
const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2));
|
749
|
+
const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4);
|
750
|
+
|
751
|
+
const int * q4 = (const int *)bq4_K->qs + (iqs/2);
|
752
|
+
const int v1 = q4[0];
|
753
|
+
const int v2 = q4[4];
|
754
|
+
|
755
|
+
const int dot1 = dpct::dp4a(ui2, v2 & 0x0f0f0f0f, dpct::dp4a(ui1, v1 & 0x0f0f0f0f, 0));
|
756
|
+
const int dot2 = dpct::dp4a(ui4, (v2 >> 4) & 0x0f0f0f0f, dpct::dp4a(ui3, (v1 >> 4) & 0x0f0f0f0f, 0));
|
757
|
+
const int dot3 = dpct::dp4a(0x01010101, ui2, dpct::dp4a(0x01010101, ui1, 0));
|
758
|
+
const int dot4 = dpct::dp4a(0x01010101, ui4, dpct::dp4a(0x01010101, ui3, 0));
|
759
|
+
|
760
|
+
sumf_d += d8_1 * (dot1 * s[0]) + d8_2 * (dot2 * s[1]);
|
761
|
+
sumf_m += d8_1 * (dot3 * s[2]) + d8_2 * (dot4 * s[3]);
|
762
|
+
|
763
|
+
return dall * sumf_d - dmin * sumf_m;
|
764
|
+
|
765
|
+
#else
|
766
|
+
bad_arch();
|
767
|
+
#endif // __SYCL_ARCH__ >= VER_4VEC
|
768
|
+
|
769
|
+
#endif
|
770
|
+
}
|
771
|
+
|
772
|
+
static __dpct_inline__ float
|
773
|
+
vec_dot_q5_K_q8_1(const void *__restrict__ vbq,
|
774
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
|
775
|
+
|
776
|
+
#ifndef GGML_QKK_64
|
777
|
+
const block_q5_K * bq5_K = (const block_q5_K *) vbq;
|
778
|
+
|
779
|
+
int vl[2];
|
780
|
+
int vh[2];
|
781
|
+
int u[2*QR5_K];
|
782
|
+
float d8[QR5_K];
|
783
|
+
|
784
|
+
const int bq8_offset = QR5_K * ((iqs/2) / (QI8_1/2));
|
785
|
+
const int * ql = (const int *)(bq5_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4));
|
786
|
+
const int * qh = (const int *)(bq5_K->qh + 4 * ((iqs/2)%4));
|
787
|
+
|
788
|
+
vl[0] = ql[0];
|
789
|
+
vl[1] = ql[4];
|
790
|
+
|
791
|
+
vh[0] = qh[0] >> bq8_offset;
|
792
|
+
vh[1] = qh[4] >> bq8_offset;
|
793
|
+
|
794
|
+
const uint16_t * scales = (const uint16_t *)bq5_K->scales;
|
795
|
+
uint16_t aux[2];
|
796
|
+
const int j = bq8_offset/2;
|
797
|
+
if (j < 2) {
|
798
|
+
aux[0] = scales[j+0] & 0x3f3f;
|
799
|
+
aux[1] = scales[j+2] & 0x3f3f;
|
800
|
+
} else {
|
801
|
+
aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2);
|
802
|
+
aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2);
|
803
|
+
}
|
804
|
+
const uint8_t * sc = (const uint8_t *)aux;
|
805
|
+
const uint8_t * m = sc + 2;
|
806
|
+
|
807
|
+
#pragma unroll
|
808
|
+
for (int i = 0; i < QR5_K; ++i) {
|
809
|
+
const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
|
810
|
+
d8[i] = bq8i->ds[0];
|
811
|
+
|
812
|
+
const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4);
|
813
|
+
u[2*i+0] = q8[0];
|
814
|
+
u[2*i+1] = q8[4];
|
815
|
+
}
|
816
|
+
|
817
|
+
return vec_dot_q5_K_q8_1_impl_vmmq(vl, vh, u, sc, m, bq5_K->dm, d8);
|
818
|
+
|
819
|
+
#else
|
820
|
+
|
821
|
+
#if __SYCL_ARCH__ >= VER_4VEC // lowest compute capability for integer intrinsics
|
822
|
+
const block_q5_K * bq5_K = (const block_q5_K *) vbq;
|
823
|
+
|
824
|
+
const int8_t * s = bq5_K->scales;
|
825
|
+
|
826
|
+
const float d = bq5_K->d;
|
827
|
+
|
828
|
+
const float d8_1 = bq8_1[0].ds[0];
|
829
|
+
const float d8_2 = bq8_1[1].ds[1];
|
830
|
+
|
831
|
+
const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2));
|
832
|
+
const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4);
|
833
|
+
const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2));
|
834
|
+
const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4);
|
835
|
+
|
836
|
+
const int * ql = (const int *)bq5_K->qs + (iqs/2);
|
837
|
+
const int vl1 = ql[0];
|
838
|
+
const int vl2 = ql[4];
|
839
|
+
|
840
|
+
const int step = 4 * (iqs/2); // 0, 4, 8, 12
|
841
|
+
const int im = step/8; // = 0 for iqs = 0, 2, = 1 for iqs = 4, 6
|
842
|
+
const int in = step%8; // 0, 4, 0, 4
|
843
|
+
const int vh = (*((const int *)(bq5_K->qh + in))) >> im;
|
844
|
+
|
845
|
+
const int v1 = (((vh << 4) & 0x10101010) ^ 0x10101010) | ((vl1 >> 0) & 0x0f0f0f0f);
|
846
|
+
const int v2 = (((vh << 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 0) & 0x0f0f0f0f);
|
847
|
+
const int v3 = (((vh >> 0) & 0x10101010) ^ 0x10101010) | ((vl1 >> 4) & 0x0f0f0f0f);
|
848
|
+
const int v4 = (((vh >> 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 4) & 0x0f0f0f0f);
|
849
|
+
|
850
|
+
const float sumf_d = d8_1 * (dpct::dp4a(ui1, v1, 0) * s[0] + dpct::dp4a(ui2, v2, 0) * s[1])
|
851
|
+
+ d8_2 * (dpct::dp4a(ui3, v3, 0) * s[2] + dpct::dp4a(ui4, v4, 0) * s[3]);
|
852
|
+
|
853
|
+
return d * sumf_d;
|
854
|
+
|
855
|
+
#else
|
856
|
+
bad_arch();
|
857
|
+
#endif // __SYCL_ARCH__ >= VER_4VEC
|
858
|
+
|
859
|
+
#endif
|
860
|
+
}
|
861
|
+
|
862
|
+
static __dpct_inline__ float
|
863
|
+
vec_dot_q6_K_q8_1(const void *__restrict__ vbq,
|
864
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
|
865
|
+
|
866
|
+
const block_q6_K * bq6_K = (const block_q6_K *) vbq;
|
867
|
+
|
868
|
+
const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/4);
|
869
|
+
const int scale_offset = (QI6_K/4) * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/8);
|
870
|
+
const int vh_shift = 2 * ((iqs % (QI6_K/2)) / (QI6_K/4));
|
871
|
+
|
872
|
+
const int vl = get_int_from_uint8(bq6_K->ql, iqs);
|
873
|
+
const int vh = get_int_from_uint8(bq6_K->qh, (QI6_K/4) * (iqs / (QI6_K/2)) + iqs % (QI6_K/4)) >> vh_shift;
|
874
|
+
|
875
|
+
const int8_t * scales = bq6_K->scales + scale_offset;
|
876
|
+
|
877
|
+
int u[QR6_K];
|
878
|
+
float d8[QR6_K];
|
879
|
+
|
880
|
+
#pragma unroll
|
881
|
+
for (int i = 0; i < QR6_K; ++i) {
|
882
|
+
u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + 2*i].qs, iqs % QI8_1);
|
883
|
+
d8[i] = bq8_1[bq8_offset + 2 * i].ds[0];
|
884
|
+
}
|
885
|
+
|
886
|
+
return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scales, bq6_K->d, d8);
|
887
|
+
}
|
888
|
+
|
889
|
+
|
890
|
+
static __dpct_inline__ float
|
891
|
+
vec_dot_iq2_xxs_q8_1(const void *__restrict__ vbq,
|
892
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs,
|
893
|
+
const uint64_t *iq2xxs_grid, const uint8_t *ksigns_iq2xs,
|
894
|
+
const uint8_t *kmask_iq2xs) {
|
895
|
+
#if QK_K == 256
|
896
|
+
const block_iq2_xxs * bq2 = (const block_iq2_xxs *) vbq;
|
897
|
+
|
898
|
+
const int ib32 = iqs;
|
899
|
+
const uint16_t * q2 = bq2->qs + 4*ib32;
|
900
|
+
const uint8_t * aux8 = (const uint8_t *)q2;
|
901
|
+
const int8_t * q8 = bq8_1[ib32].qs;
|
902
|
+
uint32_t aux32 = q2[2] | (q2[3] << 16);
|
903
|
+
int sumi = 0;
|
904
|
+
for (int l = 0; l < 4; ++l) {
|
905
|
+
const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
|
906
|
+
const uint8_t signs = ksigns_iq2xs[aux32 & 127];
|
907
|
+
for (int j = 0; j < 8; ++j) {
|
908
|
+
sumi += q8[j] * grid[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
|
909
|
+
}
|
910
|
+
q8 += 8;
|
911
|
+
aux32 >>= 7;
|
912
|
+
}
|
913
|
+
const float d = (float)bq2->d * (0.5f + aux32) * bq8_1[ib32].ds[0] * 0.25f;
|
914
|
+
return d * sumi;
|
915
|
+
#else
|
916
|
+
assert(false);
|
917
|
+
return 0.f;
|
918
|
+
#endif
|
919
|
+
}
|
920
|
+
|
921
|
+
static __dpct_inline__ float
|
922
|
+
vec_dot_iq2_xs_q8_1(const void *__restrict__ vbq,
|
923
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs,
|
924
|
+
const uint64_t *iq2xs_grid, const uint64_t *ksigns64) {
|
925
|
+
#if DPCT_COMPATIBILITY_TEMP >= \
|
926
|
+
MIN_CC_DP4A // lowest compute capability for integer intrinsics
|
927
|
+
#if QK_K == 256
|
928
|
+
const block_iq2_xs * bq2 = (const block_iq2_xs *) vbq;
|
929
|
+
|
930
|
+
const int ib32 = iqs;
|
931
|
+
const uint16_t * q2 = bq2->qs + 4*ib32;
|
932
|
+
const int8_t * q8 = bq8_1[ib32].qs;
|
933
|
+
const uint8_t ls1 = bq2->scales[ib32] & 0xf;
|
934
|
+
const uint8_t ls2 = bq2->scales[ib32] >> 4;
|
935
|
+
int sumi1 = 0;
|
936
|
+
for (int l = 0; l < 2; ++l) {
|
937
|
+
const uint32_t * grid = (const uint32_t *)(iq2xs_grid + (q2[l] & 511));
|
938
|
+
const uint32_t * signs = (const uint32_t *)(ksigns64 + (q2[l] >> 9));
|
939
|
+
const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
|
940
|
+
grid[0] ^ signs[0], signs[0], std::minus<>());
|
941
|
+
const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
|
942
|
+
grid[1] ^ signs[1], signs[1], std::minus<>());
|
943
|
+
sumi1 = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi1);
|
944
|
+
sumi1 = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi1);
|
945
|
+
q8 += 8;
|
946
|
+
}
|
947
|
+
int sumi2 = 0;
|
948
|
+
for (int l = 2; l < 4; ++l) {
|
949
|
+
const uint32_t * grid = (const uint32_t *)(iq2xs_grid + (q2[l] & 511));
|
950
|
+
const uint32_t * signs = (const uint32_t *)(ksigns64 + (q2[l] >> 9));
|
951
|
+
const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
|
952
|
+
grid[0] ^ signs[0], signs[0], std::minus<>());
|
953
|
+
const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
|
954
|
+
grid[1] ^ signs[1], signs[1], std::minus<>());
|
955
|
+
sumi2 = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi2);
|
956
|
+
sumi2 = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi2);
|
957
|
+
q8 += 8;
|
958
|
+
}
|
959
|
+
const float d = (float)bq2->d * bq8_1[ib32].ds[0] * 0.25f;
|
960
|
+
return d * ((0.5f + ls1) * sumi1 + (0.5f + ls2) * sumi2);
|
961
|
+
#else
|
962
|
+
assert(false);
|
963
|
+
return 0.f;
|
964
|
+
#endif
|
965
|
+
#else
|
966
|
+
assert(false);
|
967
|
+
return 0.f;
|
968
|
+
#endif
|
969
|
+
}
|
970
|
+
|
971
|
+
static __dpct_inline__ float
|
972
|
+
vec_dot_iq2_s_q8_1(const void *__restrict__ vbq,
|
973
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
|
974
|
+
#if QK_K == 256
|
975
|
+
const block_iq2_s * bq2 = (const block_iq2_s *) vbq;
|
976
|
+
|
977
|
+
const int ib32 = iqs;
|
978
|
+
const int8_t * q8 = bq8_1[ib32].qs;
|
979
|
+
const uint8_t * signs = bq2->qs + QK_K/8 + 4*ib32;
|
980
|
+
const uint8_t ls1 = bq2->scales[ib32] & 0xf;
|
981
|
+
const uint8_t ls2 = bq2->scales[ib32] >> 4;
|
982
|
+
int sumi1 = 0;
|
983
|
+
for (int l = 0; l < 2; ++l) {
|
984
|
+
const uint32_t * grid = (const uint32_t *)(iq2s_grid + (bq2->qs[4*ib32+l] | ((bq2->qh[ib32] << (8-2*l)) & 0x300)));
|
985
|
+
const uint32_t signs0 = dpct::vectorized_binary<sycl::uchar4>(
|
986
|
+
((signs[l] & 0xf) * 0x01010101) & 0x08040201, 0x08040201,
|
987
|
+
std::equal_to<>());
|
988
|
+
const uint32_t signs1 = dpct::vectorized_binary<sycl::uchar4>(
|
989
|
+
((signs[l] >> 4) * 0x01010101) & 0x08040201, 0x08040201,
|
990
|
+
std::equal_to<>());
|
991
|
+
const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
|
992
|
+
grid[0] ^ signs0, signs0, std::minus<>());
|
993
|
+
const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
|
994
|
+
grid[1] ^ signs1, signs1, std::minus<>());
|
995
|
+
sumi1 = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi1);
|
996
|
+
sumi1 = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi1);
|
997
|
+
q8 += 8;
|
998
|
+
}
|
999
|
+
int sumi2 = 0;
|
1000
|
+
for (int l = 2; l < 4; ++l) {
|
1001
|
+
const uint32_t * grid = (const uint32_t *)(iq2s_grid + (bq2->qs[4*ib32+l] | ((bq2->qh[ib32] << (8-2*l)) & 0x300)));
|
1002
|
+
const uint32_t signs0 = dpct::vectorized_binary<sycl::uchar4>(
|
1003
|
+
((signs[l] & 0xf) * 0x01010101) & 0x08040201, 0x08040201,
|
1004
|
+
std::equal_to<>());
|
1005
|
+
const uint32_t signs1 = dpct::vectorized_binary<sycl::uchar4>(
|
1006
|
+
((signs[l] >> 4) * 0x01010101) & 0x08040201, 0x08040201,
|
1007
|
+
std::equal_to<>());
|
1008
|
+
const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
|
1009
|
+
grid[0] ^ signs0, signs0, std::minus<>());
|
1010
|
+
const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
|
1011
|
+
grid[1] ^ signs1, signs1, std::minus<>());
|
1012
|
+
sumi2 = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi2);
|
1013
|
+
sumi2 = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi2);
|
1014
|
+
q8 += 8;
|
1015
|
+
}
|
1016
|
+
const float d = (float)bq2->d * bq8_1[ib32].ds[0] * 0.25f;
|
1017
|
+
return d * ((0.5f + ls1) * sumi1 + (0.5f + ls2) * sumi2);
|
1018
|
+
#else
|
1019
|
+
assert(false);
|
1020
|
+
#endif
|
1021
|
+
}
|
1022
|
+
|
1023
|
+
static __dpct_inline__ float
|
1024
|
+
vec_dot_iq3_xxs_q8_1(const void *__restrict__ vbq,
|
1025
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs,
|
1026
|
+
const uint32_t *iq3xxs_grid, const uint64_t *ksigns64) {
|
1027
|
+
#if DPCT_COMPATIBILITY_TEMP >= \
|
1028
|
+
MIN_CC_DP4A // lowest compute capability for integer intrinsics
|
1029
|
+
#if QK_K == 256
|
1030
|
+
const block_iq3_xxs * bq2 = (const block_iq3_xxs *) vbq;
|
1031
|
+
|
1032
|
+
const int ib32 = iqs;
|
1033
|
+
const uint8_t * q3 = bq2->qs + 8*ib32;
|
1034
|
+
const uint16_t * gas = (const uint16_t *)(bq2->qs + QK_K/4) + 2*ib32;
|
1035
|
+
const int8_t * q8 = bq8_1[ib32].qs;
|
1036
|
+
uint32_t aux32 = gas[0] | (gas[1] << 16);
|
1037
|
+
int sumi = 0;
|
1038
|
+
for (int l = 0; l < 4; ++l) {
|
1039
|
+
const uint32_t * grid1 = iq3xxs_grid + q3[2*l+0];
|
1040
|
+
const uint32_t * grid2 = iq3xxs_grid + q3[2*l+1];
|
1041
|
+
const uint32_t * signs = (const uint32_t *)(ksigns64 + (aux32 & 127));
|
1042
|
+
const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
|
1043
|
+
grid1[0] ^ signs[0], signs[0], std::minus<>());
|
1044
|
+
const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
|
1045
|
+
grid2[0] ^ signs[1], signs[1], std::minus<>());
|
1046
|
+
sumi = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi);
|
1047
|
+
sumi = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi);
|
1048
|
+
q8 += 8;
|
1049
|
+
aux32 >>= 7;
|
1050
|
+
}
|
1051
|
+
const float d = (float)bq2->d * (0.5f + aux32) * bq8_1[ib32].ds[0] * 0.5f;
|
1052
|
+
return d * sumi;
|
1053
|
+
#else
|
1054
|
+
assert(false);
|
1055
|
+
return 0.f;
|
1056
|
+
#endif
|
1057
|
+
#else
|
1058
|
+
assert(false);
|
1059
|
+
return 0.f;
|
1060
|
+
#endif
|
1061
|
+
}
|
1062
|
+
|
1063
|
+
static __dpct_inline__ float
|
1064
|
+
vec_dot_iq3_s_q8_1(const void *__restrict__ vbq,
|
1065
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs,
|
1066
|
+
const uint32_t *iq3s_grid) {
|
1067
|
+
#if QK_K == 256
|
1068
|
+
const block_iq3_s * bq2 = (const block_iq3_s *) vbq;
|
1069
|
+
|
1070
|
+
const int ib32 = iqs;
|
1071
|
+
const uint8_t * qs = bq2->qs + 8*ib32;
|
1072
|
+
const int8_t * q8 = bq8_1[ib32].qs;
|
1073
|
+
int sumi = 0;
|
1074
|
+
for (int l = 0; l < 4; ++l) {
|
1075
|
+
const uint32_t * grid1 = iq3s_grid + (qs[2*l+0] | ((bq2->qh[ib32] << (8 - 2*l)) & 256));
|
1076
|
+
const uint32_t * grid2 = iq3s_grid + (qs[2*l+1] | ((bq2->qh[ib32] << (7 - 2*l)) & 256));
|
1077
|
+
uint32_t signs0 = dpct::vectorized_binary<sycl::uchar4>(
|
1078
|
+
((bq2->signs[4 * ib32 + l] & 0xf) * 0x01010101) & 0x08040201,
|
1079
|
+
0x08040201, std::equal_to<>());
|
1080
|
+
uint32_t signs1 = dpct::vectorized_binary<sycl::uchar4>(
|
1081
|
+
((bq2->signs[4 * ib32 + l] >> 4) * 0x01010101) & 0x08040201,
|
1082
|
+
0x08040201, std::equal_to<>());
|
1083
|
+
const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
|
1084
|
+
grid1[0] ^ signs0, signs0, std::minus<>());
|
1085
|
+
const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
|
1086
|
+
grid2[0] ^ signs1, signs1, std::minus<>());
|
1087
|
+
sumi = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi);
|
1088
|
+
sumi = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi);
|
1089
|
+
q8 += 8;
|
1090
|
+
}
|
1091
|
+
const float d =
|
1092
|
+
(float)bq2->d *
|
1093
|
+
(1 + 2 * ((bq2->scales[ib32 / 2] >> 4 * (ib32 % 2)) & 0xf)) *
|
1094
|
+
bq8_1[ib32].ds[0];
|
1095
|
+
return d * sumi;
|
1096
|
+
#else
|
1097
|
+
assert(false);
|
1098
|
+
#endif
|
1099
|
+
}
|
1100
|
+
|
1101
|
+
static __dpct_inline__ float
|
1102
|
+
vec_dot_iq1_s_q8_1(const void *__restrict__ vbq,
|
1103
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs,
|
1104
|
+
const uint32_t *iq1s_grid_gpu) {
|
1105
|
+
#if QK_K == 256
|
1106
|
+
const block_iq1_s * bq1 = (const block_iq1_s *) vbq;
|
1107
|
+
|
1108
|
+
const int ib32 = iqs;
|
1109
|
+
int sumi = 0;
|
1110
|
+
const int * q8 = (const int *)bq8_1[ib32].qs;
|
1111
|
+
for (int l = 0; l < 4; ++l) {
|
1112
|
+
const int * grid = (const int *)(iq1s_grid_gpu + (bq1->qs[4*ib32+l] | (((bq1->qh[ib32] >> 3*l) & 7) << 8)));
|
1113
|
+
int grid0 = grid[0] & 0x0f0f0f0f;
|
1114
|
+
int grid1 = (grid[0] >> 4) & 0x0f0f0f0f;
|
1115
|
+
sumi = dpct::dp4a(q8[2 * l + 1], grid1,
|
1116
|
+
dpct::dp4a(q8[2 * l + 0], grid0, sumi));
|
1117
|
+
}
|
1118
|
+
|
1119
|
+
const float delta = bq1->qh[ib32] & 0x8000 ? -1-IQ1S_DELTA : -1+IQ1S_DELTA;
|
1120
|
+
const float d1q = (float)bq1->d * (2*((bq1->qh[ib32] >> 12) & 7) + 1);
|
1121
|
+
const float d = d1q * bq8_1[ib32].ds[0];
|
1122
|
+
const float m = d1q * bq8_1[ib32].ds[1];
|
1123
|
+
return d * sumi + m * delta;
|
1124
|
+
#else
|
1125
|
+
assert(false);
|
1126
|
+
#endif
|
1127
|
+
}
|
1128
|
+
|
1129
|
+
static __dpct_inline__ float
|
1130
|
+
vec_dot_iq1_m_q8_1(const void *__restrict__ vbq,
|
1131
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
|
1132
|
+
#if QK_K == 256
|
1133
|
+
const block_iq1_m * bq1 = (const block_iq1_m *) vbq;
|
1134
|
+
|
1135
|
+
const int ib32 = iqs;
|
1136
|
+
int sumi[2] = {0, 0};
|
1137
|
+
float sumf[2] = {0.f, 0.f};
|
1138
|
+
|
1139
|
+
const int * q8 = (const int *)bq8_1[ib32].qs;
|
1140
|
+
for (int l = 0; l < 4; ++l) {
|
1141
|
+
const int * grid = (const int *)(iq1s_grid_gpu + (bq1->qs[4*ib32+l] | (((bq1->qh[2*ib32+l/2] >> 4*(l%2)) & 7) << 8)));
|
1142
|
+
int grid0 = grid[0] & 0x0f0f0f0f;
|
1143
|
+
int grid1 = (grid[0] >> 4) & 0x0f0f0f0f;
|
1144
|
+
sumi[l / 2] = dpct::dp4a(q8[2 * l + 1], grid1,
|
1145
|
+
dpct::dp4a(q8[2 * l + 0], grid0, sumi[l / 2]));
|
1146
|
+
const float delta = (bq1->qh[2*ib32+l/2] >> 4*(l%2)) & 0x08 ? -1-IQ1M_DELTA : -1+IQ1M_DELTA;
|
1147
|
+
const int sumy = dpct::dp4a(q8[2 * l + 1], 0x01010101,
|
1148
|
+
dpct::dp4a(q8[2 * l + 0], 0x01010101, 0));
|
1149
|
+
sumf[l/2] += delta*sumy;
|
1150
|
+
}
|
1151
|
+
|
1152
|
+
iq1m_scale_t scale;
|
1153
|
+
const uint16_t * sc = (const uint16_t *)bq1->scales;
|
1154
|
+
scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
|
1155
|
+
const float d = (float)scale.f16 * bq8_1[ib32].ds[0];
|
1156
|
+
return d * ((sumi[0] + sumf[0]) * (2*((sc[ib32/2] >> 6*(ib32%2)) & 0x7) + 1) + (sumi[1] + sumf[1]) * (2*((sc[ib32/2] >> (6*(ib32%2)+3)) & 0x7) + 1));
|
1157
|
+
#else
|
1158
|
+
assert(false);
|
1159
|
+
#endif
|
1160
|
+
}
|
1161
|
+
|
1162
|
+
|
1163
|
+
static __dpct_inline__ float
|
1164
|
+
vec_dot_iq4_nl_q8_1(const void *__restrict__ vbq,
|
1165
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
|
1166
|
+
|
1167
|
+
const block_iq4_nl * bq = (const block_iq4_nl *) vbq;
|
1168
|
+
|
1169
|
+
const uint16_t * q4 = (const uint16_t *)bq->qs + 2*iqs;
|
1170
|
+
const int32_t * q8 = (const int32_t *)bq8_1->qs + iqs;
|
1171
|
+
|
1172
|
+
const uint8_t * values = (const uint8_t *)kvalues_iq4nl;
|
1173
|
+
|
1174
|
+
int v1, v2;
|
1175
|
+
int sumi1 = 0, sumi2 = 0;
|
1176
|
+
for (int l = 0; l < VDR_Q4_0_Q8_1_MMVQ; ++l) {
|
1177
|
+
const uint32_t aux = q4[2*l] | (q4[2*l+1] << 16);
|
1178
|
+
get_int_from_table_16(aux, values, v1, v2);
|
1179
|
+
sumi1 = dpct::dp4a(v1, q8[l + 0], sumi1);
|
1180
|
+
sumi2 = dpct::dp4a(v2, q8[l + 4], sumi2);
|
1181
|
+
}
|
1182
|
+
|
1183
|
+
const float d = (float)bq->d * bq8_1->ds[0];
|
1184
|
+
return d * (sumi1 + sumi2);
|
1185
|
+
}
|
1186
|
+
|
1187
|
+
|
1188
|
+
static __dpct_inline__ float
|
1189
|
+
vec_dot_iq4_xs_q8_1(const void *__restrict__ vbq,
|
1190
|
+
const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
|
1191
|
+
|
1192
|
+
#if QK_K == 256
|
1193
|
+
const block_iq4_xs * bq4 = (const block_iq4_xs *) vbq;
|
1194
|
+
const uint8_t * values = (const uint8_t *)kvalues_iq4nl;
|
1195
|
+
|
1196
|
+
// iqs is 0...7
|
1197
|
+
const int ib32 = iqs;
|
1198
|
+
const int32_t * q8 = (const int *)bq8_1[ib32].qs;
|
1199
|
+
const uint32_t * q4 = (const uint32_t *)bq4->qs + 4*ib32;
|
1200
|
+
const int8_t ls = ((bq4->scales_l[ib32/2] >> 4*(ib32%2)) & 0xf) | (((bq4->scales_h >> 2*ib32) & 3) << 4);
|
1201
|
+
const float d = (float)bq4->d * (ls - 32) * bq8_1[ib32].ds[0];
|
1202
|
+
int v1, v2;
|
1203
|
+
int sumi1 = 0, sumi2 = 0;
|
1204
|
+
for (int j = 0; j < 4; ++j) {
|
1205
|
+
get_int_from_table_16(q4[j], values, v1, v2);
|
1206
|
+
sumi1 = dpct::dp4a(v1, q8[j + 0], sumi1);
|
1207
|
+
sumi2 = dpct::dp4a(v2, q8[j + 4], sumi2);
|
1208
|
+
}
|
1209
|
+
return d * (sumi1 + sumi2);
|
1210
|
+
#else
|
1211
|
+
assert(false);
|
1212
|
+
#endif
|
1213
|
+
}
|
1214
|
+
|
1215
|
+
#endif // GGML_SYCL_VECDOTQ_HPP
|