whispercpp 1.3.1 → 1.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +7 -3
- data/README.md +161 -43
- data/Rakefile +45 -13
- data/ext/.gitignore +4 -8
- data/ext/dependencies.rb +73 -0
- data/ext/extconf.rb +21 -198
- data/ext/options.rb +85 -0
- data/ext/ruby_whisper.c +177 -0
- data/ext/ruby_whisper.h +17 -2
- data/ext/ruby_whisper_context.c +672 -0
- data/ext/ruby_whisper_error.c +52 -0
- data/ext/ruby_whisper_model.c +232 -0
- data/ext/ruby_whisper_params.c +1303 -0
- data/ext/ruby_whisper_segment.c +220 -0
- data/ext/ruby_whisper_transcribe.cpp +93 -0
- data/ext/ruby_whisper_vad_params.c +288 -0
- data/ext/sources/CMakeGraphVizOptions.cmake +8 -0
- data/ext/sources/CMakeLists.txt +255 -0
- data/ext/sources/bindings/javascript/CMakeLists.txt +41 -0
- data/ext/sources/bindings/javascript/emscripten.cpp +93 -0
- data/ext/sources/bindings/javascript/libwhisper.worker.js +1 -0
- data/ext/sources/bindings/javascript/package-tmpl.json +26 -0
- data/ext/sources/bindings/javascript/package.json +26 -0
- data/ext/sources/bindings/javascript/whisper.js +19 -0
- data/ext/sources/build-xcframework.sh +547 -0
- data/ext/sources/cmake/DefaultTargetOptions.cmake +16 -0
- data/ext/sources/cmake/FindFFmpeg.cmake +163 -0
- data/ext/sources/cmake/build-info.cmake +60 -0
- data/ext/sources/cmake/git-vars.cmake +22 -0
- data/ext/sources/cmake/whisper-config.cmake.in +65 -0
- data/ext/sources/cmake/whisper.pc.in +10 -0
- data/ext/sources/examples/CMakeLists.txt +124 -0
- data/ext/sources/examples/addon.node/CMakeLists.txt +31 -0
- data/ext/sources/examples/addon.node/__test__/whisper.spec.js +133 -0
- data/ext/sources/examples/addon.node/addon.cpp +557 -0
- data/ext/sources/examples/addon.node/index.js +57 -0
- data/ext/sources/examples/addon.node/package.json +16 -0
- data/ext/sources/examples/addon.node/vad-example.js +132 -0
- data/ext/sources/examples/bench/CMakeLists.txt +8 -0
- data/ext/sources/examples/bench/bench.cpp +176 -0
- data/ext/sources/examples/bench.wasm/CMakeLists.txt +49 -0
- data/ext/sources/examples/bench.wasm/emscripten.cpp +87 -0
- data/ext/sources/examples/bench.wasm/index-tmpl.html +284 -0
- data/ext/sources/examples/cli/CMakeLists.txt +8 -0
- data/ext/sources/examples/cli/cli.cpp +1295 -0
- data/ext/sources/examples/coi-serviceworker.js +146 -0
- data/ext/sources/examples/command/CMakeLists.txt +10 -0
- data/ext/sources/examples/command/command.cpp +800 -0
- data/ext/sources/examples/command/commands.txt +9 -0
- data/ext/sources/examples/command.wasm/CMakeLists.txt +50 -0
- data/ext/sources/examples/command.wasm/emscripten.cpp +327 -0
- data/ext/sources/examples/command.wasm/index-tmpl.html +414 -0
- data/ext/sources/examples/common-ggml.cpp +238 -0
- data/ext/sources/examples/common-ggml.h +18 -0
- data/ext/sources/examples/common-sdl.cpp +227 -0
- data/ext/sources/examples/common-sdl.h +49 -0
- data/ext/sources/examples/common-whisper.cpp +175 -0
- data/ext/sources/examples/common-whisper.h +24 -0
- data/ext/sources/examples/common.cpp +675 -0
- data/ext/sources/examples/common.h +322 -0
- data/ext/sources/examples/deprecation-warning/CMakeLists.txt +6 -0
- data/ext/sources/examples/deprecation-warning/deprecation-warning.cpp +38 -0
- data/ext/sources/examples/ffmpeg-transcode.cpp +368 -0
- data/ext/sources/examples/generate-karaoke.sh +57 -0
- data/ext/sources/examples/grammar-parser.cpp +423 -0
- data/ext/sources/examples/grammar-parser.h +29 -0
- data/ext/sources/examples/helpers.js +191 -0
- data/ext/sources/examples/json.hpp +24596 -0
- data/ext/sources/examples/livestream.sh +112 -0
- data/ext/sources/examples/lsp/CMakeLists.txt +9 -0
- data/ext/sources/examples/lsp/lsp.cpp +469 -0
- data/ext/sources/examples/lsp/whisper.vim +362 -0
- data/ext/sources/examples/miniaudio.h +93468 -0
- data/ext/sources/examples/python/test_whisper_processor.py +7 -0
- data/ext/sources/examples/python/whisper_processor.py +54 -0
- data/ext/sources/examples/quantize/CMakeLists.txt +6 -0
- data/ext/sources/examples/quantize/quantize.cpp +226 -0
- data/ext/sources/examples/server/CMakeLists.txt +15 -0
- data/ext/sources/examples/server/bench.js +29 -0
- data/ext/sources/examples/server/httplib.h +10497 -0
- data/ext/sources/examples/server/server.cpp +1238 -0
- data/ext/sources/examples/server.py +115 -0
- data/ext/sources/examples/stb_vorbis.c +5584 -0
- data/ext/sources/examples/stream/CMakeLists.txt +10 -0
- data/ext/sources/examples/stream/stream.cpp +435 -0
- data/ext/sources/examples/stream.wasm/CMakeLists.txt +49 -0
- data/ext/sources/examples/stream.wasm/emscripten.cpp +216 -0
- data/ext/sources/examples/stream.wasm/index-tmpl.html +414 -0
- data/ext/sources/examples/sycl/CMakeLists.txt +9 -0
- data/ext/sources/examples/sycl/build.sh +22 -0
- data/ext/sources/examples/sycl/ls-sycl-device.cpp +11 -0
- data/ext/sources/examples/sycl/run-whisper.sh +17 -0
- data/ext/sources/examples/talk-llama/CMakeLists.txt +43 -0
- data/ext/sources/examples/talk-llama/eleven-labs.py +80 -0
- data/ext/sources/examples/talk-llama/llama-adapter.cpp +388 -0
- data/ext/sources/examples/talk-llama/llama-adapter.h +76 -0
- data/ext/sources/examples/talk-llama/llama-arch.cpp +1914 -0
- data/ext/sources/examples/talk-llama/llama-arch.h +464 -0
- data/ext/sources/examples/talk-llama/llama-batch.cpp +843 -0
- data/ext/sources/examples/talk-llama/llama-batch.h +147 -0
- data/ext/sources/examples/talk-llama/llama-chat.cpp +685 -0
- data/ext/sources/examples/talk-llama/llama-chat.h +59 -0
- data/ext/sources/examples/talk-llama/llama-context.cpp +2845 -0
- data/ext/sources/examples/talk-llama/llama-context.h +297 -0
- data/ext/sources/examples/talk-llama/llama-cparams.cpp +5 -0
- data/ext/sources/examples/talk-llama/llama-cparams.h +41 -0
- data/ext/sources/examples/talk-llama/llama-grammar.cpp +1229 -0
- data/ext/sources/examples/talk-llama/llama-grammar.h +173 -0
- data/ext/sources/examples/talk-llama/llama-graph.cpp +1693 -0
- data/ext/sources/examples/talk-llama/llama-graph.h +710 -0
- data/ext/sources/examples/talk-llama/llama-hparams.cpp +103 -0
- data/ext/sources/examples/talk-llama/llama-hparams.h +207 -0
- data/ext/sources/examples/talk-llama/llama-impl.cpp +167 -0
- data/ext/sources/examples/talk-llama/llama-impl.h +61 -0
- data/ext/sources/examples/talk-llama/llama-io.cpp +15 -0
- data/ext/sources/examples/talk-llama/llama-io.h +35 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified-iswa.cpp +279 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified-iswa.h +128 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified.cpp +1841 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache-unified.h +303 -0
- data/ext/sources/examples/talk-llama/llama-kv-cache.h +44 -0
- data/ext/sources/examples/talk-llama/llama-kv-cells.h +439 -0
- data/ext/sources/examples/talk-llama/llama-memory-hybrid.cpp +246 -0
- data/ext/sources/examples/talk-llama/llama-memory-hybrid.h +138 -0
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.cpp +1125 -0
- data/ext/sources/examples/talk-llama/llama-memory-recurrent.h +183 -0
- data/ext/sources/examples/talk-llama/llama-memory.cpp +59 -0
- data/ext/sources/examples/talk-llama/llama-memory.h +116 -0
- data/ext/sources/examples/talk-llama/llama-mmap.cpp +600 -0
- data/ext/sources/examples/talk-llama/llama-mmap.h +68 -0
- data/ext/sources/examples/talk-llama/llama-model-loader.cpp +1163 -0
- data/ext/sources/examples/talk-llama/llama-model-loader.h +169 -0
- data/ext/sources/examples/talk-llama/llama-model-saver.cpp +282 -0
- data/ext/sources/examples/talk-llama/llama-model-saver.h +37 -0
- data/ext/sources/examples/talk-llama/llama-model.cpp +15114 -0
- data/ext/sources/examples/talk-llama/llama-model.h +452 -0
- data/ext/sources/examples/talk-llama/llama-quant.cpp +1049 -0
- data/ext/sources/examples/talk-llama/llama-quant.h +1 -0
- data/ext/sources/examples/talk-llama/llama-sampling.cpp +2575 -0
- data/ext/sources/examples/talk-llama/llama-sampling.h +32 -0
- data/ext/sources/examples/talk-llama/llama-vocab.cpp +3377 -0
- data/ext/sources/examples/talk-llama/llama-vocab.h +132 -0
- data/ext/sources/examples/talk-llama/llama.cpp +358 -0
- data/ext/sources/examples/talk-llama/llama.h +1484 -0
- data/ext/sources/examples/talk-llama/prompts/talk-alpaca.txt +23 -0
- data/ext/sources/examples/talk-llama/speak +40 -0
- data/ext/sources/examples/talk-llama/speak.bat +1 -0
- data/ext/sources/examples/talk-llama/speak.ps1 +14 -0
- data/ext/sources/examples/talk-llama/talk-llama.cpp +810 -0
- data/ext/sources/examples/talk-llama/unicode-data.cpp +7034 -0
- data/ext/sources/examples/talk-llama/unicode-data.h +20 -0
- data/ext/sources/examples/talk-llama/unicode.cpp +854 -0
- data/ext/sources/examples/talk-llama/unicode.h +66 -0
- data/ext/sources/examples/vad-speech-segments/CMakeLists.txt +8 -0
- data/ext/sources/examples/vad-speech-segments/speech.cpp +149 -0
- data/ext/sources/examples/wchess/CMakeLists.txt +10 -0
- data/ext/sources/examples/wchess/libwchess/CMakeLists.txt +19 -0
- data/ext/sources/examples/wchess/libwchess/Chessboard.cpp +803 -0
- data/ext/sources/examples/wchess/libwchess/Chessboard.h +33 -0
- data/ext/sources/examples/wchess/libwchess/WChess.cpp +193 -0
- data/ext/sources/examples/wchess/libwchess/WChess.h +63 -0
- data/ext/sources/examples/wchess/libwchess/test-chessboard.cpp +117 -0
- data/ext/sources/examples/wchess/wchess.cmd/CMakeLists.txt +8 -0
- data/ext/sources/examples/wchess/wchess.cmd/wchess.cmd.cpp +251 -0
- data/ext/sources/examples/whisper.wasm/CMakeLists.txt +50 -0
- data/ext/sources/examples/whisper.wasm/emscripten.cpp +118 -0
- data/ext/sources/examples/whisper.wasm/index-tmpl.html +658 -0
- data/ext/sources/ggml/CMakeLists.txt +435 -0
- data/ext/sources/ggml/cmake/BuildTypes.cmake +54 -0
- data/ext/sources/ggml/cmake/GitVars.cmake +22 -0
- data/ext/sources/ggml/cmake/common.cmake +50 -0
- data/ext/sources/ggml/cmake/ggml-config.cmake.in +152 -0
- data/ext/{ggml → sources/ggml}/include/ggml-alloc.h +1 -1
- data/ext/{ggml → sources/ggml}/include/ggml-backend.h +10 -8
- data/ext/{ggml → sources/ggml}/include/ggml-cpp.h +2 -1
- data/ext/{ggml → sources/ggml}/include/ggml-cpu.h +11 -1
- data/ext/{ggml → sources/ggml}/include/ggml-metal.h +1 -1
- data/ext/{ggml → sources/ggml}/include/ggml-opt.h +49 -28
- data/ext/{ggml → sources/ggml}/include/ggml-rpc.h +6 -1
- data/ext/{ggml → sources/ggml}/include/ggml-vulkan.h +0 -2
- data/ext/{ggml → sources/ggml}/include/ggml.h +325 -269
- data/ext/sources/ggml/include/gguf.h +202 -0
- data/ext/sources/ggml/src/CMakeLists.txt +404 -0
- data/ext/{ggml → sources/ggml}/src/ggml-alloc.c +34 -29
- data/ext/sources/ggml/src/ggml-amx/CMakeLists.txt +107 -0
- data/ext/{ggml → sources/ggml}/src/ggml-backend-impl.h +1 -2
- data/ext/{ggml → sources/ggml}/src/ggml-backend-reg.cpp +92 -53
- data/ext/{ggml → sources/ggml}/src/ggml-backend.cpp +69 -34
- data/ext/sources/ggml/src/ggml-blas/CMakeLists.txt +87 -0
- data/ext/sources/ggml/src/ggml-cann/CMakeLists.txt +75 -0
- data/ext/sources/ggml/src/ggml-cann/Doxyfile +2579 -0
- data/ext/{ggml → sources/ggml}/src/ggml-cann/acl_tensor.cpp +10 -4
- data/ext/{ggml → sources/ggml}/src/ggml-cann/acl_tensor.h +5 -5
- data/ext/{ggml → sources/ggml}/src/ggml-cann/aclnn_ops.cpp +1272 -1506
- data/ext/sources/ggml/src/ggml-cann/aclnn_ops.h +1125 -0
- data/ext/{ggml → sources/ggml}/src/ggml-cann/common.h +140 -1
- data/ext/{ggml → sources/ggml}/src/ggml-cann/ggml-cann.cpp +588 -146
- data/ext/sources/ggml/src/ggml-cann/kernels/CMakeLists.txt +30 -0
- data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/dup.cpp +3 -5
- data/ext/{ggml → sources/ggml}/src/ggml-common.h +16 -8
- data/ext/sources/ggml/src/ggml-cpu/CMakeLists.txt +597 -0
- data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/amx.cpp +3 -2
- data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/mmq.cpp +11 -10
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp +94 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/quants.c +4114 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/arm/repack.cpp +2163 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/loongarch/quants.c +2639 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/powerpc/cpu-feats.cpp +82 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/powerpc/quants.c +2732 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/quants.c +2069 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/riscv/repack.cpp +397 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/s390/quants.c +1300 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/wasm/quants.c +1481 -0
- data/ext/{ggml/src/ggml-cpu/cpu-feats-x86.cpp → sources/ggml/src/ggml-cpu/arch/x86/cpu-feats.cpp} +5 -1
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/quants.c +4311 -0
- data/ext/sources/ggml/src/ggml-cpu/arch/x86/repack.cpp +3285 -0
- data/ext/sources/ggml/src/ggml-cpu/arch-fallback.h +184 -0
- data/ext/sources/ggml/src/ggml-cpu/binary-ops.cpp +158 -0
- data/ext/sources/ggml/src/ggml-cpu/binary-ops.h +16 -0
- data/ext/sources/ggml/src/ggml-cpu/cmake/FindSIMD.cmake +100 -0
- data/ext/sources/ggml/src/ggml-cpu/common.h +73 -0
- data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu-impl.h +172 -41
- data/ext/sources/ggml/src/ggml-cpu/ggml-cpu.c +3551 -0
- data/ext/{ggml → sources/ggml}/src/ggml-cpu/ggml-cpu.cpp +78 -25
- data/ext/{ggml/src/ggml-cpu/ggml-cpu-hbm.cpp → sources/ggml/src/ggml-cpu/hbm.cpp} +1 -1
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.cpp +337 -0
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kernels.h +95 -0
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +482 -0
- data/ext/sources/ggml/src/ggml-cpu/kleidiai/kleidiai.h +17 -0
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.cpp +3594 -0
- data/ext/sources/ggml/src/ggml-cpu/llamafile/sgemm.h +19 -0
- data/ext/sources/ggml/src/ggml-cpu/ops.cpp +9786 -0
- data/ext/sources/ggml/src/ggml-cpu/ops.h +118 -0
- data/ext/sources/ggml/src/ggml-cpu/quants.c +1158 -0
- data/ext/{ggml/src/ggml-cpu/ggml-cpu-quants.h → sources/ggml/src/ggml-cpu/quants.h} +26 -0
- data/ext/sources/ggml/src/ggml-cpu/repack.cpp +1571 -0
- data/ext/sources/ggml/src/ggml-cpu/repack.h +98 -0
- data/ext/sources/ggml/src/ggml-cpu/simd-mappings.h +1184 -0
- data/ext/{ggml/src/ggml-cpu/ggml-cpu-traits.cpp → sources/ggml/src/ggml-cpu/traits.cpp} +1 -1
- data/ext/sources/ggml/src/ggml-cpu/unary-ops.cpp +186 -0
- data/ext/sources/ggml/src/ggml-cpu/unary-ops.h +28 -0
- data/ext/sources/ggml/src/ggml-cpu/vec.cpp +345 -0
- data/ext/sources/ggml/src/ggml-cpu/vec.h +1027 -0
- data/ext/sources/ggml/src/ggml-cuda/CMakeLists.txt +184 -0
- data/ext/sources/ggml/src/ggml-cuda/acc.cu +61 -0
- data/ext/sources/ggml/src/ggml-cuda/acc.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/arange.cu +34 -0
- data/ext/sources/ggml/src/ggml-cuda/arange.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/argmax.cu +91 -0
- data/ext/sources/ggml/src/ggml-cuda/argmax.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/argsort.cu +104 -0
- data/ext/sources/ggml/src/ggml-cuda/argsort.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cu +363 -0
- data/ext/sources/ggml/src/ggml-cuda/binbcast.cuh +9 -0
- data/ext/sources/ggml/src/ggml-cuda/clamp.cu +45 -0
- data/ext/sources/ggml/src/ggml-cuda/clamp.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/common.cuh +851 -0
- data/ext/sources/ggml/src/ggml-cuda/concat.cu +221 -0
- data/ext/sources/ggml/src/ggml-cuda/concat.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cu +89 -0
- data/ext/sources/ggml/src/ggml-cuda/conv-transpose-1d.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/conv2d-dw.cu +161 -0
- data/ext/sources/ggml/src/ggml-cuda/conv2d-dw.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/conv2d-transpose.cu +91 -0
- data/ext/sources/ggml/src/ggml-cuda/conv2d-transpose.cuh +4 -0
- data/ext/sources/ggml/src/ggml-cuda/convert.cu +752 -0
- data/ext/sources/ggml/src/ggml-cuda/convert.cuh +31 -0
- data/ext/sources/ggml/src/ggml-cuda/count-equal.cu +64 -0
- data/ext/sources/ggml/src/ggml-cuda/count-equal.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/cp-async.cuh +57 -0
- data/ext/sources/ggml/src/ggml-cuda/cpy.cu +705 -0
- data/ext/sources/ggml/src/ggml-cuda/cpy.cuh +11 -0
- data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cu +189 -0
- data/ext/sources/ggml/src/ggml-cuda/cross-entropy-loss.cuh +7 -0
- data/ext/sources/ggml/src/ggml-cuda/dequantize.cuh +103 -0
- data/ext/sources/ggml/src/ggml-cuda/diagmask.cu +40 -0
- data/ext/sources/ggml/src/ggml-cuda/diagmask.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-common.cuh +881 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-mma-f16.cuh +1474 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cu +357 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f16.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cu +365 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-tile-f32.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f16.cuh +482 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-vec-f32.cuh +472 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cu +638 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn.cu +346 -0
- data/ext/sources/ggml/src/ggml-cuda/fattn.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/getrows.cu +275 -0
- data/ext/sources/ggml/src/ggml-cuda/getrows.cuh +15 -0
- data/ext/sources/ggml/src/ggml-cuda/ggml-cuda.cu +3647 -0
- data/ext/sources/ggml/src/ggml-cuda/gla.cu +93 -0
- data/ext/sources/ggml/src/ggml-cuda/gla.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/im2col.cu +103 -0
- data/ext/sources/ggml/src/ggml-cuda/im2col.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/mean.cu +19 -0
- data/ext/sources/ggml/src/ggml-cuda/mean.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/mma.cuh +396 -0
- data/ext/sources/ggml/src/ggml-cuda/mmq.cu +324 -0
- data/ext/sources/ggml/src/ggml-cuda/mmq.cuh +3217 -0
- data/ext/sources/ggml/src/ggml-cuda/mmv.cu +506 -0
- data/ext/sources/ggml/src/ggml-cuda/mmv.cuh +11 -0
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cu +595 -0
- data/ext/sources/ggml/src/ggml-cuda/mmvq.cuh +12 -0
- data/ext/sources/ggml/src/ggml-cuda/norm.cu +458 -0
- data/ext/sources/ggml/src/ggml-cuda/norm.cuh +11 -0
- data/ext/sources/ggml/src/ggml-cuda/opt-step-adamw.cu +78 -0
- data/ext/sources/ggml/src/ggml-cuda/opt-step-adamw.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/out-prod.cu +68 -0
- data/ext/sources/ggml/src/ggml-cuda/out-prod.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/pad.cu +49 -0
- data/ext/sources/ggml/src/ggml-cuda/pad.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/pool2d.cu +94 -0
- data/ext/sources/ggml/src/ggml-cuda/pool2d.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/quantize.cu +190 -0
- data/ext/sources/ggml/src/ggml-cuda/quantize.cuh +27 -0
- data/ext/sources/ggml/src/ggml-cuda/rope.cu +456 -0
- data/ext/sources/ggml/src/ggml-cuda/rope.cuh +7 -0
- data/ext/sources/ggml/src/ggml-cuda/scale.cu +31 -0
- data/ext/sources/ggml/src/ggml-cuda/scale.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/softmax.cu +283 -0
- data/ext/sources/ggml/src/ggml-cuda/softmax.cuh +7 -0
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cu +148 -0
- data/ext/sources/ggml/src/ggml-cuda/ssm-conv.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cu +155 -0
- data/ext/sources/ggml/src/ggml-cuda/ssm-scan.cuh +3 -0
- data/ext/sources/ggml/src/ggml-cuda/sum.cu +45 -0
- data/ext/sources/ggml/src/ggml-cuda/sum.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/sumrows.cu +26 -0
- data/ext/sources/ggml/src/ggml-cuda/sumrows.cuh +4 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_1-ncols2_8.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_1.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_2.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_16-ncols2_4.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_4.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_2-ncols2_8.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_1.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_32-ncols2_2.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_2.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_4.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_4-ncols2_8.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_64-ncols2_1.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_1.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_2.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_4.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-ncols1_8-ncols2_8.cu +10 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +78 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_s.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_s.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_s.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q2_k.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q3_k.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_k.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_1.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_k.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q6_k.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/template-instances/mmq-instance-q8_0.cu +5 -0
- data/ext/sources/ggml/src/ggml-cuda/tsembd.cu +47 -0
- data/ext/sources/ggml/src/ggml-cuda/tsembd.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/unary.cu +378 -0
- data/ext/sources/ggml/src/ggml-cuda/unary.cuh +66 -0
- data/ext/sources/ggml/src/ggml-cuda/upscale.cu +51 -0
- data/ext/sources/ggml/src/ggml-cuda/upscale.cuh +5 -0
- data/ext/sources/ggml/src/ggml-cuda/vecdotq.cuh +1135 -0
- data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/cuda.h +1 -0
- data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/hip.h +57 -0
- data/ext/{ggml → sources/ggml}/src/ggml-cuda/vendors/musa.h +7 -1
- data/ext/sources/ggml/src/ggml-cuda/wkv.cu +199 -0
- data/ext/sources/ggml/src/ggml-cuda/wkv.cuh +7 -0
- data/ext/sources/ggml/src/ggml-hip/CMakeLists.txt +135 -0
- data/ext/{ggml → sources/ggml}/src/ggml-impl.h +147 -158
- data/ext/sources/ggml/src/ggml-kompute/CMakeLists.txt +166 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/common.comp +112 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_add.comp +58 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_addrow.comp +25 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f16.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f16_f32.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f16.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_cpy_f32_f32.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_diagmask.comp +30 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_gelu.comp +22 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows.comp +17 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f16.comp +31 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_f32.comp +31 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_0.comp +38 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q4_1.comp +39 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_getrows_q6_k.comp +44 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp +69 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_mat_f32.comp +51 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_0.comp +33 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_1.comp +35 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp +140 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp +106 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q8_0.comp +73 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp +28 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_norm.comp +84 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_relu.comp +21 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rmsnorm.comp +53 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp +52 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale.comp +19 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_scale_8.comp +23 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_silu.comp +22 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp +72 -0
- data/ext/sources/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp +71 -0
- data/ext/sources/ggml/src/ggml-metal/CMakeLists.txt +121 -0
- data/ext/sources/ggml/src/ggml-metal/ggml-metal-impl.h +649 -0
- data/ext/{ggml → sources/ggml}/src/ggml-metal/ggml-metal.m +2504 -1108
- data/ext/{ggml → sources/ggml}/src/ggml-metal/ggml-metal.metal +2102 -1463
- data/ext/sources/ggml/src/ggml-musa/CMakeLists.txt +113 -0
- data/ext/sources/ggml/src/ggml-musa/mudnn.cu +112 -0
- data/ext/sources/ggml/src/ggml-musa/mudnn.cuh +12 -0
- data/ext/sources/ggml/src/ggml-opencl/CMakeLists.txt +110 -0
- data/ext/sources/ggml/src/ggml-opencl/ggml-opencl.cpp +6494 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/add.cl +83 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/argsort.cl +86 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/clamp.cl +20 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/concat.cl +109 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cpy.cl +184 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/cvt.cl +118 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/diag_mask_inf.cl +58 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/div.cl +72 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/embed_kernel.py +26 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gelu.cl +62 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle.cl +268 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general.cl +274 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/get_rows.cl +163 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/glu.cl +201 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/group_norm.cl +72 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f16.cl +57 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/im2col_f32.cl +57 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul.cl +79 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mat_Ab_Bi_8x4.cl +139 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f16.cl +118 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32.cl +118 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_1row.cl +94 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f16_f32_l4.cl +84 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_f32_f32.cl +118 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_id_q4_0_f32_8x_flat.cl +283 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32.cl +192 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_16x_flat.cl +307 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_1d_8x_flat.cl +265 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_8x_flat.cl +272 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q4_0_f32_v.cl +254 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/mul_mv_q6_k.cl +190 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/norm.cl +81 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/pad.cl +30 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/relu.cl +16 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/repeat.cl +39 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/rms_norm.cl +96 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/rope.cl +721 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/scale.cl +16 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sigmoid.cl +29 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/silu.cl +30 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f16.cl +87 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_4_f32.cl +87 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f16.cl +86 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/softmax_f32.cl +86 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sub.cl +72 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/sum_rows.cl +39 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/tanh.cl +63 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/transpose.cl +84 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/tsembd.cl +48 -0
- data/ext/sources/ggml/src/ggml-opencl/kernels/upscale.cl +121 -0
- data/ext/{ggml → sources/ggml}/src/ggml-opt.cpp +373 -190
- data/ext/{ggml → sources/ggml}/src/ggml-quants.c +120 -128
- data/ext/sources/ggml/src/ggml-rpc/CMakeLists.txt +9 -0
- data/ext/{ggml → sources/ggml}/src/ggml-rpc/ggml-rpc.cpp +494 -84
- data/ext/sources/ggml/src/ggml-sycl/CMakeLists.txt +189 -0
- data/ext/sources/ggml/src/ggml-sycl/backend.hpp +37 -0
- data/ext/sources/ggml/src/ggml-sycl/binbcast.cpp +344 -0
- data/ext/sources/ggml/src/ggml-sycl/binbcast.hpp +39 -0
- data/ext/{ggml → sources/ggml}/src/ggml-sycl/common.cpp +20 -32
- data/ext/sources/ggml/src/ggml-sycl/common.hpp +561 -0
- data/ext/{ggml → sources/ggml}/src/ggml-sycl/concat.cpp +56 -70
- data/ext/sources/ggml/src/ggml-sycl/concat.hpp +20 -0
- data/ext/{ggml → sources/ggml}/src/ggml-sycl/conv.cpp +8 -12
- data/ext/sources/ggml/src/ggml-sycl/conv.hpp +20 -0
- data/ext/sources/ggml/src/ggml-sycl/convert.cpp +575 -0
- data/ext/sources/ggml/src/ggml-sycl/convert.hpp +34 -0
- data/ext/sources/ggml/src/ggml-sycl/cpy.cpp +839 -0
- data/ext/sources/ggml/src/ggml-sycl/cpy.hpp +11 -0
- data/ext/sources/ggml/src/ggml-sycl/dequantize.hpp +823 -0
- data/ext/{ggml → sources/ggml}/src/ggml-sycl/dmmv.cpp +188 -67
- data/ext/sources/ggml/src/ggml-sycl/dmmv.hpp +27 -0
- data/ext/sources/ggml/src/ggml-sycl/dpct/helper.hpp +2987 -0
- data/ext/sources/ggml/src/ggml-sycl/element_wise.cpp +1120 -0
- data/ext/sources/ggml/src/ggml-sycl/element_wise.hpp +84 -0
- data/ext/sources/ggml/src/ggml-sycl/gemm.hpp +102 -0
- data/ext/sources/ggml/src/ggml-sycl/getrows.cpp +212 -0
- data/ext/sources/ggml/src/ggml-sycl/getrows.hpp +20 -0
- data/ext/{ggml → sources/ggml}/src/ggml-sycl/ggml-sycl.cpp +1197 -1295
- data/ext/sources/ggml/src/ggml-sycl/gla.cpp +106 -0
- data/ext/sources/ggml/src/ggml-sycl/gla.hpp +8 -0
- data/ext/sources/ggml/src/ggml-sycl/im2col.cpp +136 -0
- data/ext/sources/ggml/src/ggml-sycl/im2col.hpp +21 -0
- data/ext/{ggml → sources/ggml}/src/ggml-sycl/mmq.cpp +60 -81
- data/ext/sources/ggml/src/ggml-sycl/mmq.hpp +33 -0
- data/ext/sources/ggml/src/ggml-sycl/mmvq.cpp +1065 -0
- data/ext/sources/ggml/src/ggml-sycl/mmvq.hpp +27 -0
- data/ext/sources/ggml/src/ggml-sycl/norm.cpp +482 -0
- data/ext/sources/ggml/src/ggml-sycl/norm.hpp +26 -0
- data/ext/{ggml → sources/ggml}/src/ggml-sycl/outprod.cpp +8 -17
- data/ext/sources/ggml/src/ggml-sycl/outprod.hpp +10 -0
- data/ext/sources/ggml/src/ggml-sycl/presets.hpp +74 -0
- data/ext/sources/ggml/src/ggml-sycl/quants.hpp +111 -0
- data/ext/sources/ggml/src/ggml-sycl/rope.cpp +472 -0
- data/ext/sources/ggml/src/ggml-sycl/rope.hpp +20 -0
- data/ext/{ggml → sources/ggml}/src/ggml-sycl/softmax.cpp +38 -28
- data/ext/sources/ggml/src/ggml-sycl/softmax.hpp +20 -0
- data/ext/sources/ggml/src/ggml-sycl/sycl_hw.cpp +15 -0
- data/ext/sources/ggml/src/ggml-sycl/sycl_hw.hpp +26 -0
- data/ext/{ggml → sources/ggml}/src/ggml-sycl/tsembd.cpp +6 -11
- data/ext/sources/ggml/src/ggml-sycl/tsembd.hpp +20 -0
- data/ext/sources/ggml/src/ggml-sycl/vecdotq.hpp +1307 -0
- data/ext/sources/ggml/src/ggml-sycl/wkv.cpp +289 -0
- data/ext/sources/ggml/src/ggml-sycl/wkv.hpp +10 -0
- data/ext/sources/ggml/src/ggml-vulkan/CMakeLists.txt +200 -0
- data/ext/sources/ggml/src/ggml-vulkan/cmake/host-toolchain.cmake.in +15 -0
- data/ext/{ggml → sources/ggml}/src/ggml-vulkan/ggml-vulkan.cpp +3822 -1335
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +31 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/acc.comp +29 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/add.comp +29 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argmax.comp +51 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/argsort.comp +69 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/clamp.comp +17 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/concat.comp +41 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/contig_copy.comp +49 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp +105 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/conv_transpose_1d.comp +98 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy.comp +23 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +51 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +242 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/cos.comp +17 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/count_equal.comp +31 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_f32.comp +20 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +462 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +699 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_head.comp +13 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_m.comp +42 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq1_s.comp +35 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp +44 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp +43 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp +48 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp +39 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp +49 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp +32 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp +34 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q2_k.comp +34 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q3_k.comp +42 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_0.comp +30 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_1.comp +32 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +68 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_0.comp +34 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_1.comp +35 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +70 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q6_k.comp +33 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q8_0.comp +31 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/diag_mask_inf.comp +34 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/div.comp +27 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +337 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.comp +162 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +360 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +267 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp +59 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/geglu.comp +13 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu.comp +25 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/gelu_quick.comp +23 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_binary_head.comp +64 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_head.comp +9 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp +76 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows.comp +33 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +41 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/glu_head.comp +15 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/glu_main.comp +29 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp +66 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +100 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/l2_norm.comp +41 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/leaky_relu.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul.comp +27 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp +48 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +169 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +118 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_m.comp +82 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq1_s.comp +79 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_s.comp +90 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xs.comp +87 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq2_xxs.comp +87 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_s.comp +90 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iq3_xxs.comp +88 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +118 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp +154 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +130 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +132 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +136 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +167 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +130 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +868 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +441 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq.comp +442 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.comp +99 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/norm.comp +44 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/opt_step_adamw.comp +42 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +28 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/pool2d.comp +74 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/quantize_q8_1.comp +77 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/reglu.comp +9 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/relu.comp +21 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat.comp +26 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/repeat_back.comp +37 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +61 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm_back.comp +55 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +58 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_multi.comp +60 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_neox.comp +43 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_norm.comp +43 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/rope_vision.comp +47 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +24 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sigmoid.comp +20 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu.comp +22 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/silu_back.comp +26 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sin.comp +17 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +173 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/soft_max_back.comp +50 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/square.comp +17 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sub.comp +29 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/sum_rows.comp +37 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/swiglu.comp +9 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp +20 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_bfloat16_support.comp +7 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp +7 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp +7 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/test_integer_dot_support.comp +7 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/timestep_embedding.comp +41 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +1373 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +36 -0
- data/ext/{ggml → sources/ggml}/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +203 -36
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp +87 -0
- data/ext/sources/ggml/src/ggml-vulkan/vulkan-shaders/wkv7.comp +91 -0
- data/ext/{ggml → sources/ggml}/src/ggml.c +918 -1782
- data/ext/sources/ggml/src/ggml.cpp +26 -0
- data/ext/sources/ggml/src/gguf.cpp +1351 -0
- data/ext/{include → sources/include}/whisper.h +70 -2
- data/ext/sources/src/CMakeLists.txt +145 -0
- data/ext/sources/src/coreml/whisper-compat.h +10 -0
- data/ext/sources/src/coreml/whisper-compat.m +35 -0
- data/ext/{src → sources/src}/coreml/whisper-decoder-impl.h +27 -15
- data/ext/{src → sources/src}/coreml/whisper-decoder-impl.m +36 -10
- data/ext/{src → sources/src}/coreml/whisper-encoder-impl.h +21 -9
- data/ext/{src → sources/src}/coreml/whisper-encoder-impl.m +29 -3
- data/ext/sources/src/coreml/whisper-encoder.mm +73 -0
- data/ext/sources/src/whisper-arch.h +197 -0
- data/ext/{src → sources/src}/whisper.cpp +1966 -386
- data/ext/sources/tests/CMakeLists.txt +105 -0
- data/ext/sources/tests/earnings21/eval.mk +58 -0
- data/ext/sources/tests/earnings21/eval.py +68 -0
- data/ext/sources/tests/earnings21/normalizers/__init__.py +2 -0
- data/ext/sources/tests/earnings21/normalizers/basic.py +80 -0
- data/ext/sources/tests/earnings21/normalizers/english.json +1741 -0
- data/ext/sources/tests/earnings21/normalizers/english.py +550 -0
- data/ext/sources/tests/earnings21/requirements.txt +6 -0
- data/ext/sources/tests/en-0-ref.txt +1 -0
- data/ext/sources/tests/en-1-ref.txt +1 -0
- data/ext/sources/tests/en-2-ref.txt +1 -0
- data/ext/sources/tests/es-0-ref.txt +1 -0
- data/ext/sources/tests/librispeech/eval.mk +39 -0
- data/ext/sources/tests/librispeech/eval.py +47 -0
- data/ext/sources/tests/librispeech/normalizers/__init__.py +2 -0
- data/ext/sources/tests/librispeech/normalizers/basic.py +80 -0
- data/ext/sources/tests/librispeech/normalizers/english.json +1741 -0
- data/ext/sources/tests/librispeech/normalizers/english.py +550 -0
- data/ext/sources/tests/librispeech/requirements.txt +6 -0
- data/ext/sources/tests/run-tests.sh +130 -0
- data/ext/sources/tests/test-c.c +3 -0
- data/ext/sources/tests/test-vad-full.cpp +54 -0
- data/ext/sources/tests/test-vad.cpp +83 -0
- data/ext/sources/tests/test-whisper.js +58 -0
- data/extsources.rb +39 -5
- data/lib/whisper/context.rb +15 -0
- data/lib/whisper/model/uri.rb +202 -126
- data/lib/whisper/segment.rb +58 -0
- data/sig/whisper.rbs +510 -0
- data/test/helper.rb +24 -0
- data/{tests → test}/test_callback.rb +45 -3
- data/{tests → test}/test_error.rb +2 -2
- data/{tests → test}/test_model.rb +47 -0
- data/test/test_package.rb +51 -0
- data/test/test_params.rb +297 -0
- data/test/test_segment.rb +146 -0
- data/test/test_vad.rb +19 -0
- data/test/test_vad_params.rb +103 -0
- data/{tests → test}/test_whisper.rb +106 -36
- data/whispercpp.gemspec +5 -5
- metadata +837 -134
- data/ext/cpu.mk +0 -9
- data/ext/examples/dr_wav.h +0 -8815
- data/ext/ggml/src/ggml-cann/aclnn_ops.h +0 -592
- data/ext/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +0 -4262
- data/ext/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +0 -8
- data/ext/ggml/src/ggml-cpu/ggml-cpu-quants.c +0 -10835
- data/ext/ggml/src/ggml-cpu/ggml-cpu.c +0 -14123
- data/ext/ggml/src/ggml-cpu/llamafile/sgemm.cpp +0 -1884
- data/ext/ggml/src/ggml-cpu/llamafile/sgemm.h +0 -14
- data/ext/ggml/src/ggml-metal/ggml-metal-impl.h +0 -288
- data/ext/ggml/src/ggml-sycl/convert.cpp +0 -547
- data/ext/ggml/src/ggml-sycl/element_wise.cpp +0 -1030
- data/ext/ggml/src/ggml-sycl/im2col.cpp +0 -126
- data/ext/ggml/src/ggml-sycl/mmvq.cpp +0 -1015
- data/ext/ggml/src/ggml-sycl/norm.cpp +0 -378
- data/ext/ggml/src/ggml-sycl/rope.cpp +0 -276
- data/ext/ggml/src/ggml-sycl/wkv6.cpp +0 -141
- data/ext/metal-embed.mk +0 -17
- data/ext/metal.mk +0 -6
- data/ext/ruby_whisper.cpp +0 -1909
- data/ext/scripts/get-flags.mk +0 -38
- data/lib/whisper.rb +0 -2
- data/tests/helper.rb +0 -7
- data/tests/test_package.rb +0 -31
- data/tests/test_params.rb +0 -160
- data/tests/test_segment.rb +0 -83
- /data/ext/{ggml → sources/ggml}/include/ggml-blas.h +0 -0
- /data/ext/{ggml → sources/ggml}/include/ggml-cann.h +0 -0
- /data/ext/{ggml → sources/ggml}/include/ggml-cuda.h +0 -0
- /data/ext/{ggml → sources/ggml}/include/ggml-kompute.h +0 -0
- /data/ext/{ggml → sources/ggml}/include/ggml-opencl.h +0 -0
- /data/ext/{ggml → sources/ggml}/include/ggml-sycl.h +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-amx/common.h +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-amx/ggml-amx.cpp +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-amx/mmq.cpp +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-amx/mmq.h +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-blas/ggml-blas.cpp +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/ascendc_kernels.h +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_f16.cpp +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_f32.cpp +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_q4_0.cpp +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/get_row_q8_0.cpp +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/amx.h +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/common.h +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-cpu/amx/mmq.h +0 -0
- /data/ext/{ggml/src/ggml-cpu/ggml-cpu-hbm.h → sources/ggml/src/ggml-cpu/hbm.h} +0 -0
- /data/ext/{ggml/src/ggml-cpu/ggml-cpu-traits.h → sources/ggml/src/ggml-cpu/traits.h} +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-kompute/ggml-kompute.cpp +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-quants.h +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-threading.cpp +0 -0
- /data/ext/{ggml → sources/ggml}/src/ggml-threading.h +0 -0
- /data/ext/{src → sources/src}/coreml/whisper-encoder.h +0 -0
- /data/ext/{src → sources/src}/openvino/whisper-openvino-encoder.cpp +0 -0
- /data/ext/{src → sources/src}/openvino/whisper-openvino-encoder.h +0 -0
- /data/{tests → test}/jfk_reader/.gitignore +0 -0
- /data/{tests → test}/jfk_reader/extconf.rb +0 -0
- /data/{tests → test}/jfk_reader/jfk_reader.c +0 -0
@@ -0,0 +1,3594 @@
|
|
1
|
+
// Copyright 2024 Mozilla Foundation
|
2
|
+
//
|
3
|
+
// Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
// a copy of this software and associated documentation files (the
|
5
|
+
// "Software"), to deal in the Software without restriction, including
|
6
|
+
// without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
// distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
// permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
// the following conditions:
|
10
|
+
//
|
11
|
+
// The above copyright notice and this permission notice shall be
|
12
|
+
// included in all copies or substantial portions of the Software.
|
13
|
+
//
|
14
|
+
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
18
|
+
// BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
19
|
+
// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
20
|
+
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
// SOFTWARE.
|
22
|
+
|
23
|
+
//
|
24
|
+
// _ _ ___ _ _ ___
|
25
|
+
// | |_(_)_ _ _ _| _ ) | /_\ / __|
|
26
|
+
// | _| | ' \ || | _ \ |__ / _ \\__ \.
|
27
|
+
// \__|_|_||_\_, |___/____/_/ \_\___/
|
28
|
+
// |__/
|
29
|
+
//
|
30
|
+
// BASIC LINEAR ALGEBRA SUBPROGRAMS
|
31
|
+
//
|
32
|
+
//
|
33
|
+
// This file implements multithreaded CPU matrix multiplication for the
|
34
|
+
// common contiguous use case C = Aᵀ * B. These kernels are designed to
|
35
|
+
// have excellent performance[1] for matrices that fit in the CPU cache
|
36
|
+
// without imposing any overhead such as cache filling or malloc calls.
|
37
|
+
//
|
38
|
+
// This implementation does not guarantee any upper bound with rounding
|
39
|
+
// errors, which grow along with k. Our goal's to maximally exploit the
|
40
|
+
// hardware for performance, and then use whatever resources remain for
|
41
|
+
// improving numerical accuracy.
|
42
|
+
//
|
43
|
+
// [1] J. Tunney, ‘LLaMA Now Goes Faster on CPUs’, Mar. 2024. [Online].
|
44
|
+
// Available: https://justine.lol/matmul/. [Accessed: 29-Mar-2024].
|
45
|
+
|
46
|
+
#if defined(__GNUC__)
|
47
|
+
#pragma GCC diagnostic ignored "-Wpedantic"
|
48
|
+
#pragma GCC diagnostic ignored "-Wignored-attributes"
|
49
|
+
#endif
|
50
|
+
|
51
|
+
#include "sgemm.h"
|
52
|
+
#include "ggml-impl.h"
|
53
|
+
#include "ggml-cpu-impl.h"
|
54
|
+
#include "ggml-quants.h"
|
55
|
+
#include "simd-mappings.h"
|
56
|
+
|
57
|
+
#include <array>
|
58
|
+
#include <type_traits>
|
59
|
+
|
60
|
+
#ifdef _MSC_VER
|
61
|
+
#define NOINLINE __declspec(noinline)
|
62
|
+
#else
|
63
|
+
#define NOINLINE __attribute__((__noinline__))
|
64
|
+
#endif
|
65
|
+
|
66
|
+
#if defined(__ARM_NEON) || defined(__AVX512F__) || defined(__VXE__) || defined(__VXE2__)
|
67
|
+
#define VECTOR_REGISTERS 32
|
68
|
+
#else
|
69
|
+
#define VECTOR_REGISTERS 16
|
70
|
+
#endif
|
71
|
+
|
72
|
+
#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
|
73
|
+
|
74
|
+
namespace {
|
75
|
+
|
76
|
+
inline float unhalf(ggml_fp16_t d) {
|
77
|
+
return GGML_CPU_FP16_TO_FP32(d);
|
78
|
+
}
|
79
|
+
|
80
|
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
81
|
+
// VECTORIZED ARITHMETIC OPERATIONS
|
82
|
+
|
83
|
+
#if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
|
84
|
+
inline __m128 add(__m128 x, __m128 y) { return _mm_add_ps(x, y); }
|
85
|
+
inline __m128 sub(__m128 x, __m128 y) { return _mm_sub_ps(x, y); }
|
86
|
+
inline __m128 mul(__m128 x, __m128 y) { return _mm_mul_ps(x, y); }
|
87
|
+
#endif // __SSE__
|
88
|
+
|
89
|
+
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
|
90
|
+
inline __m256 add(__m256 x, __m256 y) { return _mm256_add_ps(x, y); }
|
91
|
+
inline __m256 sub(__m256 x, __m256 y) { return _mm256_sub_ps(x, y); }
|
92
|
+
inline __m256 mul(__m256 x, __m256 y) { return _mm256_mul_ps(x, y); }
|
93
|
+
#endif // __AVX__
|
94
|
+
|
95
|
+
#if defined(__AVX512F__)
|
96
|
+
inline __m512 add(__m512 x, __m512 y) { return _mm512_add_ps(x, y); }
|
97
|
+
inline __m512 sub(__m512 x, __m512 y) { return _mm512_sub_ps(x, y); }
|
98
|
+
inline __m512 mul(__m512 x, __m512 y) { return _mm512_mul_ps(x, y); }
|
99
|
+
#endif // __AVX512F__
|
100
|
+
|
101
|
+
#if defined(__ARM_NEON)
|
102
|
+
inline float32x4_t add(float32x4_t x, float32x4_t y) { return vaddq_f32(x, y); }
|
103
|
+
inline float32x4_t sub(float32x4_t x, float32x4_t y) { return vsubq_f32(x, y); }
|
104
|
+
inline float32x4_t mul(float32x4_t x, float32x4_t y) { return vmulq_f32(x, y); }
|
105
|
+
#endif // __ARM_NEON
|
106
|
+
|
107
|
+
#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
|
108
|
+
inline float16x8_t add(float16x8_t x, float16x8_t y) { return vaddq_f16(x, y); }
|
109
|
+
inline float16x8_t sub(float16x8_t x, float16x8_t y) { return vsubq_f16(x, y); }
|
110
|
+
inline float16x8_t mul(float16x8_t x, float16x8_t y) { return vmulq_f16(x, y); }
|
111
|
+
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
112
|
+
|
113
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
114
|
+
inline float32x4_t add(float32x4_t x, float32x4_t y) { return vec_add(x, y); }
|
115
|
+
inline float32x4_t sub(float32x4_t x, float32x4_t y) { return vec_sub(x, y); }
|
116
|
+
inline float32x4_t mul(float32x4_t x, float32x4_t y) { return vec_mul(x, y); }
|
117
|
+
#endif
|
118
|
+
|
119
|
+
#if defined(__MMA__)
|
120
|
+
typedef vector unsigned char vec_t;
|
121
|
+
typedef __vector_quad acc_t;
|
122
|
+
#endif
|
123
|
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
124
|
+
// VECTORIZED FUSED MULTIPLY ADD
|
125
|
+
|
126
|
+
/**
|
127
|
+
* Computes a * b + c.
|
128
|
+
*/
|
129
|
+
template <typename T, typename U>
|
130
|
+
inline U madd(T a, T b, U c) {
|
131
|
+
return add(mul(a, b), c);
|
132
|
+
}
|
133
|
+
|
134
|
+
#if defined(__FMA__)
|
135
|
+
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
|
136
|
+
template <>
|
137
|
+
inline __m256 madd(__m256 a, __m256 b, __m256 c) {
|
138
|
+
return _mm256_fmadd_ps(a, b, c);
|
139
|
+
}
|
140
|
+
#endif
|
141
|
+
#if defined(__AVX512F__)
|
142
|
+
template <>
|
143
|
+
inline __m512 madd(__m512 a, __m512 b, __m512 c) {
|
144
|
+
return _mm512_fmadd_ps(a, b, c);
|
145
|
+
}
|
146
|
+
#endif
|
147
|
+
#if defined(__AVX512BF16__)
|
148
|
+
template <>
|
149
|
+
inline __m512 madd(__m512bh a, __m512bh b, __m512 c) {
|
150
|
+
return _mm512_dpbf16_ps(c, a, b);
|
151
|
+
}
|
152
|
+
template <>
|
153
|
+
inline __m256 madd(__m256bh a, __m256bh b, __m256 c) {
|
154
|
+
return _mm256_dpbf16_ps(c, a, b);
|
155
|
+
}
|
156
|
+
#endif
|
157
|
+
#endif
|
158
|
+
|
159
|
+
#if defined(__ARM_FEATURE_FMA)
|
160
|
+
template <>
|
161
|
+
inline float32x4_t madd(float32x4_t a, float32x4_t b, float32x4_t c) {
|
162
|
+
return vfmaq_f32(c, b, a);
|
163
|
+
}
|
164
|
+
#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER)
|
165
|
+
template <>
|
166
|
+
inline float16x8_t madd(float16x8_t a, float16x8_t b, float16x8_t c) {
|
167
|
+
return vfmaq_f16(c, b, a);
|
168
|
+
}
|
169
|
+
#endif
|
170
|
+
#endif
|
171
|
+
|
172
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
173
|
+
template <>
|
174
|
+
inline float32x4_t madd(float32x4_t a, float32x4_t b, float32x4_t c) {
|
175
|
+
return vec_madd(a, b, c);
|
176
|
+
}
|
177
|
+
#endif
|
178
|
+
|
179
|
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
180
|
+
// VECTORIZED HORIZONTAL SUM
|
181
|
+
|
182
|
+
#if defined(__ARM_NEON)
|
183
|
+
inline float hsum(float32x4_t x) {
|
184
|
+
return vaddvq_f32(x);
|
185
|
+
}
|
186
|
+
#endif // __ARM_NEON
|
187
|
+
|
188
|
+
#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER)
|
189
|
+
inline float hsum(float16x8_t x) {
|
190
|
+
return vaddvq_f32(vaddq_f32(vcvt_f32_f16(vget_low_f16(x)),
|
191
|
+
vcvt_f32_f16(vget_high_f16(x))));
|
192
|
+
}
|
193
|
+
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
194
|
+
|
195
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
196
|
+
inline float hsum(float32x4_t x) {
|
197
|
+
float32x4_t tmp = x + vec_reve(x);
|
198
|
+
return tmp[0] + tmp[1];
|
199
|
+
}
|
200
|
+
#endif
|
201
|
+
|
202
|
+
#if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
|
203
|
+
inline float hsum(__m128 x) {
|
204
|
+
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
|
205
|
+
x = _mm_add_ps(x, _mm_movehl_ps(x, x));
|
206
|
+
x = _mm_add_ss(x, _mm_movehdup_ps(x));
|
207
|
+
#else
|
208
|
+
__m128 t;
|
209
|
+
t = _mm_shuffle_ps(x, x, _MM_SHUFFLE(2, 3, 0, 1));
|
210
|
+
x = _mm_add_ps(x, t);
|
211
|
+
t = _mm_movehl_ps(t, x);
|
212
|
+
x = _mm_add_ss(x, t);
|
213
|
+
#endif
|
214
|
+
return _mm_cvtss_f32(x);
|
215
|
+
}
|
216
|
+
#endif
|
217
|
+
|
218
|
+
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
|
219
|
+
inline float hsum(__m256 x) {
|
220
|
+
return hsum(_mm_add_ps(_mm256_extractf128_ps(x, 1),
|
221
|
+
_mm256_castps256_ps128(x)));
|
222
|
+
}
|
223
|
+
#endif // __AVX__
|
224
|
+
|
225
|
+
#if defined(__AVX512F__)
|
226
|
+
inline float hsum(__m512 x) {
|
227
|
+
return _mm512_reduce_add_ps(x);
|
228
|
+
}
|
229
|
+
#endif // __AVX512F__
|
230
|
+
|
231
|
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
232
|
+
// VECTORIZED MEMORY LOADING
|
233
|
+
|
234
|
+
template <typename T, typename U> T load(const U *);
|
235
|
+
|
236
|
+
#if defined(__ARM_NEON)
|
237
|
+
template <> inline float32x4_t load(const float *p) {
|
238
|
+
return vld1q_f32(p);
|
239
|
+
}
|
240
|
+
#if !defined(_MSC_VER)
|
241
|
+
// FIXME: this should check for __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
242
|
+
template <> inline float16x8_t load(const ggml_fp16_t *p) {
|
243
|
+
return vld1q_f16((const float16_t *)p);
|
244
|
+
}
|
245
|
+
template <> inline float32x4_t load(const ggml_fp16_t *p) {
|
246
|
+
return vcvt_f32_f16(vld1_f16((const float16_t *)p));
|
247
|
+
}
|
248
|
+
#endif // _MSC_VER
|
249
|
+
#endif // __ARM_NEON
|
250
|
+
|
251
|
+
#if defined(__VXE__) || defined(__VXE2__)
|
252
|
+
template <> inline float32x4_t load(const ggml_fp16_t * p) {
|
253
|
+
float tmp[4];
|
254
|
+
|
255
|
+
for (int i = 0; i < 4; i++) {
|
256
|
+
tmp[i] = GGML_CPU_FP16_TO_FP32(p[i]);
|
257
|
+
}
|
258
|
+
|
259
|
+
return vec_xl(0, (const float *)(tmp));
|
260
|
+
}
|
261
|
+
template <> inline float32x4_t load(const float * p) {
|
262
|
+
return vec_xl(0, p);
|
263
|
+
}
|
264
|
+
#endif
|
265
|
+
|
266
|
+
#if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
|
267
|
+
template <> inline __m128 load(const float *p) {
|
268
|
+
return _mm_loadu_ps(p);
|
269
|
+
}
|
270
|
+
#endif // __SSE__
|
271
|
+
|
272
|
+
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)
|
273
|
+
template <> inline __m256 load(const float *p) {
|
274
|
+
return _mm256_loadu_ps(p);
|
275
|
+
}
|
276
|
+
#endif // __AVX__
|
277
|
+
|
278
|
+
#if defined(__AVX2__) || defined(__AVX512F__)
|
279
|
+
template <> inline __m256 load(const ggml_bf16_t *p) {
|
280
|
+
return _mm256_castsi256_ps(
|
281
|
+
_mm256_slli_epi32(_mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)p)), 16));
|
282
|
+
}
|
283
|
+
#endif // __AVX2__
|
284
|
+
|
285
|
+
#if defined(__F16C__)
|
286
|
+
template <> inline __m256 load(const ggml_fp16_t *p) {
|
287
|
+
return _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)p));
|
288
|
+
}
|
289
|
+
#endif // __F16C__
|
290
|
+
|
291
|
+
#if defined(__AVX512F__)
|
292
|
+
template <> inline __m512 load(const float *p) {
|
293
|
+
return _mm512_loadu_ps(p);
|
294
|
+
}
|
295
|
+
template <> inline __m512 load(const ggml_fp16_t *p) {
|
296
|
+
return _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)p));
|
297
|
+
}
|
298
|
+
template <> inline __m512 load(const ggml_bf16_t *p) {
|
299
|
+
return _mm512_castsi512_ps(
|
300
|
+
_mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm256_loadu_si256((const __m256i *)p)), 16));
|
301
|
+
}
|
302
|
+
#endif // __AVX512F__
|
303
|
+
|
304
|
+
#if defined(__AVX512BF16__)
|
305
|
+
template <> inline __m512bh load(const ggml_bf16_t *p) {
|
306
|
+
return (__m512bh)_mm512_loadu_ps((const float *)p);
|
307
|
+
}
|
308
|
+
template <> inline __m256bh load(const ggml_bf16_t *p) {
|
309
|
+
return (__m256bh)_mm256_loadu_ps((const float *)p);
|
310
|
+
}
|
311
|
+
template <> inline __m512bh load(const float *p) {
|
312
|
+
return _mm512_cvtne2ps_pbh(_mm512_loadu_ps(p + 16), _mm512_loadu_ps(p));
|
313
|
+
}
|
314
|
+
template <> inline __m256bh load(const float *p) {
|
315
|
+
return _mm512_cvtneps_pbh(_mm512_loadu_ps(p));
|
316
|
+
}
|
317
|
+
#endif
|
318
|
+
|
319
|
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
320
|
+
// FLOATING POINT MATRIX MULTIPLICATION
|
321
|
+
|
322
|
+
template <int M>
|
323
|
+
static inline int64_t BLOCK_SIZE(size_t m) {
|
324
|
+
const int64_t NB_BLOC_M = (m + M - 1) / M;
|
325
|
+
return (m % NB_BLOC_M == 0) ? m / NB_BLOC_M : (m / NB_BLOC_M) + 1;
|
326
|
+
}
|
327
|
+
|
328
|
+
static constexpr inline int64_t BLOC_POS(int64_t ib, int64_t ibN, int64_t bloc_size) {
|
329
|
+
return ib < ibN ? ib * bloc_size : ibN * bloc_size + (ib - ibN) * (bloc_size - 1);
|
330
|
+
}
|
331
|
+
|
332
|
+
template <int KN, typename D, typename V, typename TA, typename TB, typename TC>
|
333
|
+
class tinyBLAS {
|
334
|
+
public:
|
335
|
+
tinyBLAS(const ggml_compute_params * params, int64_t k,
|
336
|
+
const TA *A, int64_t lda,
|
337
|
+
const TB *B, int64_t ldb,
|
338
|
+
TC *C, int64_t ldc)
|
339
|
+
: params(params), A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc) {
|
340
|
+
}
|
341
|
+
|
342
|
+
bool matmul(int64_t m, int64_t n) {
|
343
|
+
if (k % KN != 0)
|
344
|
+
return false;
|
345
|
+
// compute RM for only need tile with size RM&RM-1
|
346
|
+
#if VECTOR_REGISTERS == 32
|
347
|
+
if (m % 16 == 0 && (m/16 >= params->nth)) {
|
348
|
+
const int64_t SIZE_N = BLOCK_SIZE<6>(n);
|
349
|
+
mnpack<4, 6, 4>(m, n, SIZE_N, 12);
|
350
|
+
return true;
|
351
|
+
}
|
352
|
+
if (m % 8 == 0 ) {
|
353
|
+
const int64_t SIZE_N = BLOCK_SIZE<6>(n);
|
354
|
+
mnpack<4, 6, 2>(m, n, SIZE_N, 12);
|
355
|
+
return true;
|
356
|
+
}
|
357
|
+
if (m % 4 == 0) {
|
358
|
+
const int64_t SIZE_N = BLOCK_SIZE<6>(n);
|
359
|
+
mnpack<4, 6, 1>(m, n, SIZE_N, 12);
|
360
|
+
return true;
|
361
|
+
}
|
362
|
+
#else // VECTOR_REGISTERS == 16
|
363
|
+
if (m % 16 == 0 && (m/16 >= params->nth)) {
|
364
|
+
const int64_t SIZE_N = BLOCK_SIZE<3>(n);
|
365
|
+
mnpack<4, 3, 4>(m, n, SIZE_N, 24);
|
366
|
+
return true;
|
367
|
+
}
|
368
|
+
if (m % 8 == 0 ) {
|
369
|
+
const int64_t SIZE_N = BLOCK_SIZE<3>(n);
|
370
|
+
mnpack<4, 3, 2>(m, n, SIZE_N, 24);
|
371
|
+
return true;
|
372
|
+
}
|
373
|
+
if (m % 4 == 0) {
|
374
|
+
const int64_t SIZE_N = BLOCK_SIZE<3>(n);
|
375
|
+
mnpack<4, 3, 1>(m, n, SIZE_N, 24);
|
376
|
+
return true;
|
377
|
+
}
|
378
|
+
#endif
|
379
|
+
return false;
|
380
|
+
}
|
381
|
+
|
382
|
+
private:
|
383
|
+
template <int RM, int RN, int BM>
|
384
|
+
inline void mnpack(int64_t m, int64_t n, int64_t SIZE_N, int64_t BN) {
|
385
|
+
if (SIZE_N == RN) {
|
386
|
+
return gemm<RM, RN, BM>(m, n, BN);
|
387
|
+
}
|
388
|
+
if constexpr (RN > 1) {
|
389
|
+
return mnpack<RM, RN-1, BM>(m, n, SIZE_N, BN);
|
390
|
+
} else {
|
391
|
+
GGML_LOG_ERROR("mnpack<%d, %d> bloc size not supported\n", RM, (int)SIZE_N);
|
392
|
+
GGML_ASSERT(false); // we have miss something.
|
393
|
+
}
|
394
|
+
}
|
395
|
+
|
396
|
+
template <int RM, int RN>
|
397
|
+
inline void gemm_bloc(int64_t ii, int64_t jj) {
|
398
|
+
D Cv[RN][RM] = {};
|
399
|
+
for (int64_t l = 0; l < k; l += KN) {
|
400
|
+
// help compiler for op order.
|
401
|
+
if constexpr (RM <= RN) {
|
402
|
+
V Av[RM];
|
403
|
+
for (int64_t i = 0; i < RM; ++i) {
|
404
|
+
Av[i] = load<V>(A + lda * (ii + i) + l);
|
405
|
+
}
|
406
|
+
for (int64_t j = 0; j < RN; ++j) {
|
407
|
+
V Bv = load<V>(B + ldb * (jj + j) + l);
|
408
|
+
for (int64_t i = 0; i < RM; ++i) {
|
409
|
+
Cv[j][i] = madd(Av[i], Bv, Cv[j][i]);
|
410
|
+
}
|
411
|
+
}
|
412
|
+
} else {
|
413
|
+
V Bv[RN];
|
414
|
+
for (int64_t j = 0; j < RN; ++j) {
|
415
|
+
Bv[j] = load<V>(B + ldb * (jj + j) + l);
|
416
|
+
}
|
417
|
+
for (int64_t i = 0; i < RM; ++i) {
|
418
|
+
V Av = load<V>(A + lda * (ii + i) + l);
|
419
|
+
for (int64_t j = 0; j < RN; ++j) {
|
420
|
+
Cv[j][i] = madd(Av, Bv[j], Cv[j][i]);
|
421
|
+
}
|
422
|
+
}
|
423
|
+
}
|
424
|
+
}
|
425
|
+
for (int64_t j = 0; j < RN; ++j)
|
426
|
+
for (int64_t i = 0; i < RM; ++i)
|
427
|
+
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
|
428
|
+
}
|
429
|
+
|
430
|
+
template <int RM, int RN, int BM>
|
431
|
+
NOINLINE void gemm(int64_t m, int64_t n, int64_t BN) {
|
432
|
+
GGML_ASSERT(m % (RM * BM) == 0);
|
433
|
+
const int64_t ytiles = m / (RM * BM);
|
434
|
+
const int64_t xtiles = (n + RN -1) / RN;
|
435
|
+
const int64_t jj_RN = (xtiles - (xtiles * RN - n));
|
436
|
+
|
437
|
+
// "round" bloc_size to "nearest" BN
|
438
|
+
const int64_t NB_BN = xtiles < BN ? 1 : (xtiles + BN / 2) / BN;
|
439
|
+
const int64_t SIZE_BN = xtiles % NB_BN == 0 ? xtiles / NB_BN : xtiles / NB_BN + 1;
|
440
|
+
const int64_t jj_BN = (NB_BN - (NB_BN * SIZE_BN - xtiles));
|
441
|
+
const int64_t nb_job = ytiles * NB_BN;
|
442
|
+
|
443
|
+
if (params->ith == 0) {
|
444
|
+
GGML_ASSERT( jj_BN * SIZE_BN + (NB_BN - jj_BN) * (SIZE_BN - 1) == xtiles);
|
445
|
+
// Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start.
|
446
|
+
ggml_threadpool_chunk_set(params->threadpool, params->nth);
|
447
|
+
}
|
448
|
+
|
449
|
+
ggml_barrier(params->threadpool);
|
450
|
+
|
451
|
+
int64_t job = params->ith;
|
452
|
+
while (job < nb_job) {
|
453
|
+
const int64_t ii = (job % ytiles) * RM * BM;
|
454
|
+
const int64_t jb = job / ytiles;
|
455
|
+
const int64_t jr0 = BLOC_POS(jb , jj_BN, SIZE_BN);
|
456
|
+
const int64_t jrN = BLOC_POS(jb+1, jj_BN, SIZE_BN);
|
457
|
+
|
458
|
+
const int64_t jj0 = BLOC_POS(jr0, jj_RN, RN);
|
459
|
+
const int64_t jj2 = BLOC_POS(jrN, jj_RN, RN);
|
460
|
+
const int64_t jj1 = jj2 < jj_RN * RN ? jj2 : jj_RN * RN;
|
461
|
+
|
462
|
+
for (int64_t bi = 0; bi < BM * RM; bi += RM) {
|
463
|
+
int64_t jj = jj0;
|
464
|
+
for (; jj < jj1; jj += RN) {
|
465
|
+
gemm_bloc<RM, RN>(ii + bi, jj);
|
466
|
+
}
|
467
|
+
if constexpr (RN > 1) {
|
468
|
+
for (; jj < jj2; jj += RN - 1) {
|
469
|
+
gemm_bloc<RM, RN-1>(ii + bi, jj);
|
470
|
+
}
|
471
|
+
}
|
472
|
+
GGML_ASSERT(jj == jj2);
|
473
|
+
}
|
474
|
+
|
475
|
+
job = ggml_threadpool_chunk_add(params->threadpool, 1);
|
476
|
+
}
|
477
|
+
|
478
|
+
ggml_barrier(params->threadpool);
|
479
|
+
return;
|
480
|
+
}
|
481
|
+
|
482
|
+
const ggml_compute_params * params;
|
483
|
+
const TA *const A;
|
484
|
+
const TB *const B;
|
485
|
+
TC *const C;
|
486
|
+
const int64_t k;
|
487
|
+
const int64_t lda;
|
488
|
+
const int64_t ldb;
|
489
|
+
const int64_t ldc;
|
490
|
+
};
|
491
|
+
|
492
|
+
//////////////////////////////////////////////////////////////////////////////////////////
|
493
|
+
// QUANT ZERO MATRIX MULTIPLICATION
|
494
|
+
|
495
|
+
#if defined(__ARM_FEATURE_DOTPROD)
|
496
|
+
template <typename TA>
|
497
|
+
class tinyBLAS_Q0_ARM {
|
498
|
+
public:
|
499
|
+
tinyBLAS_Q0_ARM(int64_t k,
|
500
|
+
const TA *A, int64_t lda,
|
501
|
+
const block_q8_0 *B, int64_t ldb,
|
502
|
+
float *C, int64_t ldc,
|
503
|
+
int ith, int nth)
|
504
|
+
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
|
505
|
+
}
|
506
|
+
|
507
|
+
void matmul(int64_t m, int64_t n) {
|
508
|
+
mnpack(0, m, 0, n);
|
509
|
+
}
|
510
|
+
|
511
|
+
private:
|
512
|
+
NOINLINE void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
513
|
+
int64_t mc, nc, mp, np;
|
514
|
+
switch ((MIN(m - m0, 3) << 4) | MIN(n - n0, 3ll)) {
|
515
|
+
case 0x33:
|
516
|
+
mc = 3;
|
517
|
+
nc = 3;
|
518
|
+
gemm<3, 3>(m0, m, n0, n);
|
519
|
+
break;
|
520
|
+
case 0x32:
|
521
|
+
mc = 3;
|
522
|
+
nc = 2;
|
523
|
+
gemm<3, 2>(m0, m, n0, n);
|
524
|
+
break;
|
525
|
+
case 0x23:
|
526
|
+
mc = 2;
|
527
|
+
nc = 3;
|
528
|
+
gemm<2, 3>(m0, m, n0, n);
|
529
|
+
break;
|
530
|
+
case 0x22:
|
531
|
+
mc = 2;
|
532
|
+
nc = 2;
|
533
|
+
gemm<2, 2>(m0, m, n0, n);
|
534
|
+
break;
|
535
|
+
case 0x31:
|
536
|
+
mc = 3;
|
537
|
+
nc = 1;
|
538
|
+
gemm<3, 1>(m0, m, n0, n);
|
539
|
+
break;
|
540
|
+
case 0x13:
|
541
|
+
mc = 1;
|
542
|
+
nc = 3;
|
543
|
+
gemm<1, 3>(m0, m, n0, n);
|
544
|
+
break;
|
545
|
+
case 0x21:
|
546
|
+
mc = 2;
|
547
|
+
nc = 1;
|
548
|
+
gemm<2, 1>(m0, m, n0, n);
|
549
|
+
break;
|
550
|
+
case 0x12:
|
551
|
+
mc = 1;
|
552
|
+
nc = 2;
|
553
|
+
gemm<1, 2>(m0, m, n0, n);
|
554
|
+
break;
|
555
|
+
case 0x11:
|
556
|
+
mc = 1;
|
557
|
+
nc = 1;
|
558
|
+
gemm<1, 1>(m0, m, n0, n);
|
559
|
+
break;
|
560
|
+
default:
|
561
|
+
return;
|
562
|
+
}
|
563
|
+
mp = m0 + (m - m0) / mc * mc;
|
564
|
+
np = n0 + (n - n0) / nc * nc;
|
565
|
+
mnpack(mp, m, n0, np);
|
566
|
+
mnpack(m0, m, np, n);
|
567
|
+
}
|
568
|
+
|
569
|
+
template <int RM, int RN>
|
570
|
+
NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
571
|
+
int64_t ytiles = (m - m0) / RM;
|
572
|
+
int64_t xtiles = (n - n0) / RN;
|
573
|
+
int64_t tiles = xtiles * ytiles;
|
574
|
+
int64_t duty = (tiles + nth - 1) / nth;
|
575
|
+
int64_t start = duty * ith;
|
576
|
+
int64_t end = start + duty;
|
577
|
+
if (end > tiles)
|
578
|
+
end = tiles;
|
579
|
+
for (int64_t job = start; job < end; ++job) {
|
580
|
+
int64_t ii = m0 + job / xtiles * RM;
|
581
|
+
int64_t jj = n0 + job % xtiles * RN;
|
582
|
+
float32x4_t Cv[RN][RM] = {};
|
583
|
+
for (int64_t l = 0; l < k; ++l)
|
584
|
+
for (int64_t j = 0; j < RN; ++j)
|
585
|
+
for (int64_t i = 0; i < RM; ++i)
|
586
|
+
Cv[j][i] = vmlaq_n_f32(Cv[j][i],
|
587
|
+
vcvtq_f32_s32(vdotq_s32(
|
588
|
+
vdotq_s32(vdupq_n_s32(0),
|
589
|
+
load_lo(A + lda * (ii + i) + l),
|
590
|
+
load_lo(B + ldb * (jj + j) + l)),
|
591
|
+
load_hi(A + lda * (ii + i) + l),
|
592
|
+
load_hi(B + ldb * (jj + j) + l))),
|
593
|
+
unhalf(A[lda * (ii + i) + l].d) *
|
594
|
+
unhalf(B[ldb * (jj + j) + l].d));
|
595
|
+
for (int64_t j = 0; j < RN; ++j)
|
596
|
+
for (int64_t i = 0; i < RM; ++i)
|
597
|
+
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
|
598
|
+
}
|
599
|
+
}
|
600
|
+
|
601
|
+
inline int8x16_t load_lo(const block_q8_0 *b) {
|
602
|
+
return vld1q_s8(b->qs);
|
603
|
+
}
|
604
|
+
|
605
|
+
inline int8x16_t load_hi(const block_q8_0 *b) {
|
606
|
+
return vld1q_s8(b->qs + 16);
|
607
|
+
}
|
608
|
+
|
609
|
+
inline int8x16_t load_lo(const block_q4_0 *b) {
|
610
|
+
return vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vld1q_u8(b->qs),
|
611
|
+
vdupq_n_u8(0x0f))),
|
612
|
+
vdupq_n_s8(0x8));
|
613
|
+
}
|
614
|
+
|
615
|
+
inline int8x16_t load_hi(const block_q4_0 *b) {
|
616
|
+
return vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(vld1q_u8(b->qs), 4)),
|
617
|
+
vdupq_n_s8(0x8));
|
618
|
+
}
|
619
|
+
|
620
|
+
const TA *const A;
|
621
|
+
const block_q8_0 *const B;
|
622
|
+
float *const C;
|
623
|
+
const int64_t k;
|
624
|
+
const int64_t lda;
|
625
|
+
const int64_t ldb;
|
626
|
+
const int64_t ldc;
|
627
|
+
const int ith;
|
628
|
+
const int nth;
|
629
|
+
};
|
630
|
+
#endif // __ARM_FEATURE_DOTPROD
|
631
|
+
|
632
|
+
#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__)
|
633
|
+
template <typename TA, typename TB, typename TC>
|
634
|
+
class tinyBLAS_Q0_AVX {
|
635
|
+
public:
|
636
|
+
tinyBLAS_Q0_AVX(int64_t k,
|
637
|
+
const TA *A, int64_t lda,
|
638
|
+
const TB *B, int64_t ldb,
|
639
|
+
TC *C, int64_t ldc,
|
640
|
+
int ith, int nth)
|
641
|
+
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
|
642
|
+
const int8_t kvalues_iq4nl[16] = {
|
643
|
+
-127, -104, -83, -65,
|
644
|
+
-49, -35, -22, -10,
|
645
|
+
1, 13, 25, 38,
|
646
|
+
53, 69, 89, 113
|
647
|
+
};
|
648
|
+
|
649
|
+
iq4nlt = _mm_loadu_si128((const __m128i *)kvalues_iq4nl);
|
650
|
+
}
|
651
|
+
|
652
|
+
void matmul(int64_t m, int64_t n) {
|
653
|
+
mnpack(0, m, 0, n);
|
654
|
+
}
|
655
|
+
|
656
|
+
private:
|
657
|
+
void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
658
|
+
int64_t mc, nc, mp, np;
|
659
|
+
switch ((MIN(m - m0, 4) << 4) | MIN(n - n0, 4)) {
|
660
|
+
#if VECTOR_REGISTERS == 32
|
661
|
+
case 0x44:
|
662
|
+
mc = 4;
|
663
|
+
nc = 4;
|
664
|
+
#if defined(__AVX2__) && defined(__F16C__)
|
665
|
+
gemm4xN<4>(m0, m, n0, n);
|
666
|
+
#else
|
667
|
+
gemm<4, 4>(m0, m, n0, n);
|
668
|
+
#endif
|
669
|
+
break;
|
670
|
+
case 0x43:
|
671
|
+
mc = 4;
|
672
|
+
nc = 3;
|
673
|
+
#if defined(__AVX2__) && defined(__F16C__)
|
674
|
+
gemm4xN<3>(m0, m, n0, n);
|
675
|
+
#else
|
676
|
+
gemm<4, 3>(m0, m, n0, n);
|
677
|
+
#endif
|
678
|
+
break;
|
679
|
+
case 0x34:
|
680
|
+
mc = 3;
|
681
|
+
nc = 4;
|
682
|
+
#if defined(__AVX2__) && defined(__F16C__)
|
683
|
+
gemmMx4<3>(m0, m, n0, n);
|
684
|
+
#else
|
685
|
+
gemm<3, 4>(m0, m, n0, n);
|
686
|
+
#endif
|
687
|
+
break;
|
688
|
+
case 0x33:
|
689
|
+
mc = 3;
|
690
|
+
nc = 3;
|
691
|
+
gemm<3, 3>(m0, m, n0, n);
|
692
|
+
break;
|
693
|
+
case 0x42:
|
694
|
+
mc = 4;
|
695
|
+
nc = 2;
|
696
|
+
#if defined(__AVX2__) && defined(__F16C__)
|
697
|
+
gemm4xN<2>(m0, m, n0, n);
|
698
|
+
#else
|
699
|
+
gemm<4, 2>(m0, m, n0, n);
|
700
|
+
#endif
|
701
|
+
break;
|
702
|
+
case 0x24:
|
703
|
+
mc = 2;
|
704
|
+
nc = 4;
|
705
|
+
#if defined(__AVX2__) && defined(__F16C__)
|
706
|
+
gemmMx4<2>(m0, m, n0, n);
|
707
|
+
#else
|
708
|
+
gemm<2, 4>(m0, m, n0, n);
|
709
|
+
#endif
|
710
|
+
break;
|
711
|
+
#else
|
712
|
+
case 0x44:
|
713
|
+
case 0x43:
|
714
|
+
case 0x42:
|
715
|
+
mc = 4;
|
716
|
+
nc = 2;
|
717
|
+
#if defined(__AVX2__) && defined(__F16C__)
|
718
|
+
gemm4xN<2>(m0, m, n0, n);
|
719
|
+
#else
|
720
|
+
gemm<4, 2>(m0, m, n0, n);
|
721
|
+
#endif
|
722
|
+
break;
|
723
|
+
case 0x34:
|
724
|
+
case 0x24:
|
725
|
+
mc = 2;
|
726
|
+
nc = 4;
|
727
|
+
#if defined(__AVX2__) && defined(__F16C__)
|
728
|
+
gemmMx4<2>(m0, m, n0, n);
|
729
|
+
#else
|
730
|
+
gemm<2, 4>(m0, m, n0, n);
|
731
|
+
#endif
|
732
|
+
break;
|
733
|
+
case 0x33:
|
734
|
+
#endif
|
735
|
+
case 0x32:
|
736
|
+
mc = 3;
|
737
|
+
nc = 2;
|
738
|
+
gemm<3, 2>(m0, m, n0, n);
|
739
|
+
break;
|
740
|
+
case 0x23:
|
741
|
+
mc = 2;
|
742
|
+
nc = 3;
|
743
|
+
gemm<2, 3>(m0, m, n0, n);
|
744
|
+
break;
|
745
|
+
case 0x41:
|
746
|
+
mc = 4;
|
747
|
+
nc = 1;
|
748
|
+
#if defined(__AVX2__) && defined(__F16C__)
|
749
|
+
gemm4xN<1>(m0, m, n0, n);
|
750
|
+
#else
|
751
|
+
gemm<4, 1>(m0, m, n0, n);
|
752
|
+
#endif
|
753
|
+
break;
|
754
|
+
case 0x22:
|
755
|
+
mc = 2;
|
756
|
+
nc = 2;
|
757
|
+
gemm<2, 2>(m0, m, n0, n);
|
758
|
+
break;
|
759
|
+
case 0x14:
|
760
|
+
mc = 1;
|
761
|
+
nc = 4;
|
762
|
+
#if defined(__AVX2__) && defined(__F16C__)
|
763
|
+
gemmMx4<1>(m0, m, n0, n);
|
764
|
+
#else
|
765
|
+
gemm<1, 4>(m0, m, n0, n);
|
766
|
+
#endif
|
767
|
+
break;
|
768
|
+
case 0x31:
|
769
|
+
mc = 3;
|
770
|
+
nc = 1;
|
771
|
+
gemm<3, 1>(m0, m, n0, n);
|
772
|
+
break;
|
773
|
+
case 0x13:
|
774
|
+
mc = 1;
|
775
|
+
nc = 3;
|
776
|
+
gemm<1, 3>(m0, m, n0, n);
|
777
|
+
break;
|
778
|
+
case 0x21:
|
779
|
+
mc = 2;
|
780
|
+
nc = 1;
|
781
|
+
gemm<2, 1>(m0, m, n0, n);
|
782
|
+
break;
|
783
|
+
case 0x12:
|
784
|
+
mc = 1;
|
785
|
+
nc = 2;
|
786
|
+
gemm<1, 2>(m0, m, n0, n);
|
787
|
+
break;
|
788
|
+
case 0x11:
|
789
|
+
mc = 1;
|
790
|
+
nc = 1;
|
791
|
+
gemm<1, 1>(m0, m, n0, n);
|
792
|
+
break;
|
793
|
+
default:
|
794
|
+
return;
|
795
|
+
}
|
796
|
+
mp = m0 + (m - m0) / mc * mc;
|
797
|
+
np = n0 + (n - n0) / nc * nc;
|
798
|
+
mnpack(mp, m, n0, np);
|
799
|
+
mnpack(m0, m, np, n);
|
800
|
+
}
|
801
|
+
|
802
|
+
#if defined(__AVX2__) && defined(__F16C__)
|
803
|
+
// Templated functions for gemm of dimensions 4xN
|
804
|
+
template <int RN>
|
805
|
+
NOINLINE void gemm4xN(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
806
|
+
int64_t ytiles = (m - m0) / 4;
|
807
|
+
int64_t xtiles = (n - n0) / RN;
|
808
|
+
int64_t tiles = xtiles * ytiles;
|
809
|
+
int64_t duty = (tiles + nth - 1) / nth;
|
810
|
+
int64_t start = duty * ith;
|
811
|
+
int64_t end = start + duty;
|
812
|
+
if (end > tiles)
|
813
|
+
end = tiles;
|
814
|
+
for (int64_t job = start; job < end; ++job) {
|
815
|
+
int64_t ii = m0 + job / xtiles * 4;
|
816
|
+
int64_t jj = n0 + job % xtiles * RN;
|
817
|
+
__m256 Cv[RN][4] = {};
|
818
|
+
for (int64_t l = 0; l < k; ++l) {
|
819
|
+
uint64_t a_delta = ((uint64_t)A[lda * (ii + 3) + l].d << 48) | ((uint64_t)A[lda * (ii + 2) + l].d << 32) | ((uint64_t)A[lda * (ii + 1) + l].d << 16) | (A[lda * (ii + 0) + l].d);
|
820
|
+
// Convert delta values for four blocks to float values
|
821
|
+
__m128 da = _mm_cvtph_ps(_mm_set_epi64x(0, a_delta));
|
822
|
+
__m256i avec0 = load(A + lda * (ii + 0) + l);
|
823
|
+
__m256i avec1 = load(A + lda * (ii + 1) + l);
|
824
|
+
__m256i avec2 = load(A + lda * (ii + 2) + l);
|
825
|
+
__m256i avec3 = load(A + lda * (ii + 3) + l);
|
826
|
+
for (int64_t j = 0; j < RN; ++j) {
|
827
|
+
__m128 db = _mm_set1_ps(unhalf(B[ldb * (jj + j) + l].d));
|
828
|
+
// Computation of product of delta values for four blocks and replicate it across 256 bit lane
|
829
|
+
__m256 dvec = _mm256_castps128_ps256(_mm_mul_ps(da, db));
|
830
|
+
dvec = _mm256_permute2f128_ps(dvec ,dvec, 0);
|
831
|
+
// Computation of dot product and multiplication with appropriate delta value products
|
832
|
+
Cv[j][0] = madd(_mm256_shuffle_ps(dvec, dvec, 0),
|
833
|
+
updot(_mm256_sign_epi8(avec0, avec0),
|
834
|
+
_mm256_sign_epi8(load(B + ldb * (jj + j) + l), avec0)),
|
835
|
+
Cv[j][0]);
|
836
|
+
Cv[j][1] = madd(_mm256_shuffle_ps(dvec, dvec, 85),
|
837
|
+
updot(_mm256_sign_epi8(avec1, avec1),
|
838
|
+
_mm256_sign_epi8(load(B + ldb * (jj + j) + l), avec1)),
|
839
|
+
Cv[j][1]);
|
840
|
+
Cv[j][2] = madd(_mm256_shuffle_ps(dvec, dvec, 170),
|
841
|
+
updot(_mm256_sign_epi8(avec2, avec2),
|
842
|
+
_mm256_sign_epi8(load(B + ldb * (jj + j) + l), avec2)),
|
843
|
+
Cv[j][2]);
|
844
|
+
Cv[j][3] = madd(_mm256_shuffle_ps(dvec, dvec, 255),
|
845
|
+
updot(_mm256_sign_epi8(avec3, avec3),
|
846
|
+
_mm256_sign_epi8(load(B + ldb * (jj + j) + l), avec3)),
|
847
|
+
Cv[j][3]);
|
848
|
+
}
|
849
|
+
}
|
850
|
+
|
851
|
+
for (int64_t j = 0; j < RN; ++j)
|
852
|
+
for (int64_t i = 0; i < 4; ++i)
|
853
|
+
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
|
854
|
+
}
|
855
|
+
}
|
856
|
+
|
857
|
+
// Templated functions for gemm of dimensions Mx4
|
858
|
+
template <int RM>
|
859
|
+
NOINLINE void gemmMx4(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
860
|
+
int64_t ytiles = (m - m0) / RM;
|
861
|
+
int64_t xtiles = (n - n0) / 4;
|
862
|
+
int64_t tiles = xtiles * ytiles;
|
863
|
+
int64_t duty = (tiles + nth - 1) / nth;
|
864
|
+
int64_t start = duty * ith;
|
865
|
+
int64_t end = start + duty;
|
866
|
+
if (end > tiles)
|
867
|
+
end = tiles;
|
868
|
+
for (int64_t job = start; job < end; ++job) {
|
869
|
+
int64_t ii = m0 + job / xtiles * RM;
|
870
|
+
int64_t jj = n0 + job % xtiles * 4;
|
871
|
+
__m256 Cv[4][RM] = {};
|
872
|
+
for (int64_t l = 0; l < k; ++l) {
|
873
|
+
uint64_t b_delta = ((uint64_t)B[ldb * (jj + 3) + l].d << 48) | ((uint64_t)B[ldb * (jj + 2) + l].d << 32) | ((uint64_t)B[ldb * (jj + 1) + l].d << 16) | (B[ldb * (jj + 0) + l].d);
|
874
|
+
// Convert delta values for four blocks to float values
|
875
|
+
__m128 db = _mm_cvtph_ps(_mm_set_epi64x(0, b_delta));
|
876
|
+
__m256i bvec0 = load(B + ldb * (jj + 0) + l);
|
877
|
+
__m256i bvec1 = load(B + ldb * (jj + 1) + l);
|
878
|
+
__m256i bvec2 = load(B + ldb * (jj + 2) + l);
|
879
|
+
__m256i bvec3 = load(B + ldb * (jj + 3) + l);
|
880
|
+
for (int64_t i = 0; i < RM; ++i) {
|
881
|
+
__m128 da = _mm_set1_ps(unhalf((A[lda * (ii + i) + l].d)));
|
882
|
+
// Computation of product of delta values for four blocks and replicate it across 256 bit lane
|
883
|
+
__m256 dvec = _mm256_castps128_ps256(_mm_mul_ps(da, db));
|
884
|
+
dvec = _mm256_permute2f128_ps(dvec ,dvec, 0);
|
885
|
+
// Computation of dot product and multiplication with appropriate delta value products
|
886
|
+
Cv[0][i] = madd(_mm256_shuffle_ps(dvec, dvec, 0),
|
887
|
+
updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l),
|
888
|
+
load(A + lda * (ii + i) + l)),
|
889
|
+
_mm256_sign_epi8(bvec0, load(A + lda * (ii + i) + l))),
|
890
|
+
Cv[0][i]);
|
891
|
+
Cv[1][i] = madd(_mm256_shuffle_ps(dvec, dvec, 85),
|
892
|
+
updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l),
|
893
|
+
load(A + lda * (ii + i) + l)),
|
894
|
+
_mm256_sign_epi8(bvec1, load(A + lda * (ii + i) + l))),
|
895
|
+
Cv[1][i]);
|
896
|
+
Cv[2][i] = madd(_mm256_shuffle_ps(dvec, dvec, 170),
|
897
|
+
updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l),
|
898
|
+
load(A + lda * (ii + i) + l)),
|
899
|
+
_mm256_sign_epi8(bvec2, load(A + lda * (ii + i) + l))),
|
900
|
+
Cv[2][i]);
|
901
|
+
Cv[3][i] = madd(_mm256_shuffle_ps(dvec, dvec, 255),
|
902
|
+
updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l),
|
903
|
+
load(A + lda * (ii + i) + l)),
|
904
|
+
_mm256_sign_epi8(bvec3, load(A + lda * (ii + i) + l))),
|
905
|
+
Cv[3][i]);
|
906
|
+
}
|
907
|
+
}
|
908
|
+
for (int64_t j = 0; j < 4; ++j)
|
909
|
+
for (int64_t i = 0; i < RM; ++i)
|
910
|
+
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
|
911
|
+
}
|
912
|
+
}
|
913
|
+
#endif
|
914
|
+
|
915
|
+
template <int RM, int RN>
|
916
|
+
NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
917
|
+
int64_t ytiles = (m - m0) / RM;
|
918
|
+
int64_t xtiles = (n - n0) / RN;
|
919
|
+
int64_t tiles = xtiles * ytiles;
|
920
|
+
int64_t duty = (tiles + nth - 1) / nth;
|
921
|
+
int64_t start = duty * ith;
|
922
|
+
int64_t end = start + duty;
|
923
|
+
if (end > tiles)
|
924
|
+
end = tiles;
|
925
|
+
for (int64_t job = start; job < end; ++job) {
|
926
|
+
int64_t ii = m0 + job / xtiles * RM;
|
927
|
+
int64_t jj = n0 + job % xtiles * RN;
|
928
|
+
__m256 Cv[RN][RM] = {};
|
929
|
+
for (int64_t l = 0; l < k; ++l)
|
930
|
+
for (int64_t j = 0; j < RN; ++j)
|
931
|
+
for (int64_t i = 0; i < RM; ++i) {
|
932
|
+
#if defined(__AVX2__)
|
933
|
+
__m256 udTmp = updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l),
|
934
|
+
load(A + lda * (ii + i) + l)),
|
935
|
+
_mm256_sign_epi8(load(B + ldb * (jj + j) + l),
|
936
|
+
load(A + lda * (ii + i) + l)));
|
937
|
+
#else
|
938
|
+
__m128i ali0 = load0(A + lda * (ii + i) + l);
|
939
|
+
__m128i ali1 = load1(A + lda * (ii + i) + l);
|
940
|
+
__m128i blj0 = load0(B + ldb * (jj + j) + l);
|
941
|
+
__m128i blj1 = load1(B + ldb * (jj + j) + l);
|
942
|
+
|
943
|
+
__m128i sepAA0 = _mm_sign_epi8(ali0, ali0);
|
944
|
+
__m128i sepAA1 = _mm_sign_epi8(ali1, ali1);
|
945
|
+
__m128i sepBA0 = _mm_sign_epi8(blj0, ali0);
|
946
|
+
__m128i sepBA1 = _mm_sign_epi8(blj1, ali1);
|
947
|
+
|
948
|
+
// updot
|
949
|
+
const __m128i oneFill = _mm_set1_epi16(1);
|
950
|
+
__m128i mad0 = _mm_maddubs_epi16(sepAA0, sepBA0);
|
951
|
+
__m128i mad1 = _mm_maddubs_epi16(sepAA1, sepBA1);
|
952
|
+
__m256 udTmp = _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_madd_epi16(oneFill, mad1), _mm_madd_epi16(oneFill, mad0)));
|
953
|
+
#endif
|
954
|
+
Cv[j][i] = madd(_mm256_set1_ps(unhalf(A[lda * (ii + i) + l].d) *
|
955
|
+
unhalf(B[ldb * (jj + j) + l].d)),
|
956
|
+
udTmp,
|
957
|
+
Cv[j][i]);
|
958
|
+
}
|
959
|
+
for (int64_t j = 0; j < RN; ++j)
|
960
|
+
for (int64_t i = 0; i < RM; ++i)
|
961
|
+
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
|
962
|
+
}
|
963
|
+
}
|
964
|
+
|
965
|
+
inline __m256i load(const block_q8_0 *b) {
|
966
|
+
return _mm256_loadu_si256((const __m256i *)b->qs);
|
967
|
+
}
|
968
|
+
|
969
|
+
inline __m128i load0(const block_q8_0 *b) {
|
970
|
+
return _mm_loadu_si128((const __m128i *)b->qs);
|
971
|
+
}
|
972
|
+
|
973
|
+
inline __m128i load1(const block_q8_0 *b) {
|
974
|
+
return _mm_loadu_si128(((const __m128i *)b->qs) + 1);
|
975
|
+
}
|
976
|
+
|
977
|
+
inline __m256i load(const block_q4_0 *b) {
|
978
|
+
return _mm256_sub_epi8(denibble(b->qs), _mm256_set1_epi8(8));
|
979
|
+
}
|
980
|
+
|
981
|
+
inline __m128i load0(const block_q4_0 *b) {
|
982
|
+
const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs));
|
983
|
+
return _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), x), _mm_set1_epi8(8));
|
984
|
+
}
|
985
|
+
|
986
|
+
inline __m128i load1(const block_q4_0 *b) {
|
987
|
+
const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs));
|
988
|
+
return _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(x, 4)), _mm_set1_epi8(8));
|
989
|
+
}
|
990
|
+
|
991
|
+
inline __m256i load(const block_q5_0 *b) {
|
992
|
+
return _mm256_or_si256(denibble(b->qs), bittobyte(b->qh));
|
993
|
+
}
|
994
|
+
|
995
|
+
inline __m128i load0(const block_q5_0* b) {
|
996
|
+
const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs));
|
997
|
+
uint32_t x32;
|
998
|
+
memcpy(&x32, b->qh, sizeof(uint32_t));
|
999
|
+
__m128i qxl = _mm_and_si128(_mm_set1_epi8(15), x);
|
1000
|
+
__m128i bytesl = _mm_cmpeq_epi8(_mm_set1_epi64x(-1),
|
1001
|
+
_mm_or_si128(_mm_set1_epi64x(0x7fbfdfeff7fbfdfe),
|
1002
|
+
_mm_shuffle_epi8(_mm_set1_epi32(x32),
|
1003
|
+
_mm_set_epi64x(0x0101010101010101, 0x0000000000000000))));
|
1004
|
+
bytesl = _mm_andnot_si128(bytesl, _mm_set1_epi8((char)0xF0));
|
1005
|
+
return _mm_or_si128(qxl, bytesl);
|
1006
|
+
}
|
1007
|
+
|
1008
|
+
inline __m128i load1(const block_q5_0* b) {
|
1009
|
+
const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs));
|
1010
|
+
uint32_t x32;
|
1011
|
+
memcpy(&x32, b->qh, sizeof(uint32_t));
|
1012
|
+
__m128i qxh = _mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(x, 4));
|
1013
|
+
__m128i bytesh = _mm_cmpeq_epi8(_mm_set1_epi64x(-1),
|
1014
|
+
_mm_or_si128(_mm_set1_epi64x(0x7fbfdfeff7fbfdfe),
|
1015
|
+
_mm_shuffle_epi8(_mm_set1_epi32(x32),
|
1016
|
+
_mm_set_epi64x(0x0303030303030303, 0x0202020202020202))));
|
1017
|
+
bytesh = _mm_andnot_si128(bytesh, _mm_set1_epi8((char)0xF0));
|
1018
|
+
return _mm_or_si128(qxh, bytesh);
|
1019
|
+
}
|
1020
|
+
|
1021
|
+
inline __m256i load(const block_iq4_nl *b) {
|
1022
|
+
return MM256_SET_M128I(load1(b), load0(b));
|
1023
|
+
}
|
1024
|
+
|
1025
|
+
inline __m128i load0(const block_iq4_nl *b) {
|
1026
|
+
const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs));
|
1027
|
+
return _mm_shuffle_epi8(iq4nlt, _mm_and_si128(_mm_set1_epi8(15), x));
|
1028
|
+
}
|
1029
|
+
|
1030
|
+
inline __m128i load1(const block_iq4_nl *b) {
|
1031
|
+
const __m128i x = _mm_loadu_si128((const __m128i *)(b->qs));
|
1032
|
+
return _mm_shuffle_epi8(iq4nlt, _mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(x, 4)));
|
1033
|
+
}
|
1034
|
+
|
1035
|
+
inline __m256 updot(__m256i u, __m256i s) {
|
1036
|
+
__m256i res;
|
1037
|
+
#if defined(__AVX512VNNI__) && defined(__AVX512VL__)
|
1038
|
+
res = _mm256_dpbusd_epi32(_mm256_setzero_si256(), u, s);
|
1039
|
+
#elif defined(__AVXVNNI__)
|
1040
|
+
res = _mm256_dpbusd_avx_epi32(_mm256_setzero_si256(), u, s);
|
1041
|
+
#else
|
1042
|
+
res = _mm256_madd_epi16(_mm256_set1_epi16(1), _mm256_maddubs_epi16(u, s));
|
1043
|
+
#endif
|
1044
|
+
return _mm256_cvtepi32_ps(res);
|
1045
|
+
}
|
1046
|
+
|
1047
|
+
static inline __m256i denibble(const uint8_t *p) {
|
1048
|
+
__m128i x = _mm_loadu_si128((const __m128i *)p);
|
1049
|
+
return _mm256_and_si256(_mm256_set1_epi8(15),
|
1050
|
+
_mm256_insertf128_si256(_mm256_castsi128_si256(x),
|
1051
|
+
_mm_srli_epi16(x, 4), 1));
|
1052
|
+
}
|
1053
|
+
|
1054
|
+
static inline __m256i bittobyte(const uint8_t *p) {
|
1055
|
+
uint32_t x32;
|
1056
|
+
memcpy(&x32, p, sizeof(uint32_t));
|
1057
|
+
__m256i bytes = _mm256_cmpeq_epi8(_mm256_set1_epi64x(-1),
|
1058
|
+
_mm256_or_si256(_mm256_set1_epi64x(0x7fbfdfeff7fbfdfe),
|
1059
|
+
_mm256_shuffle_epi8(_mm256_set1_epi32(x32),
|
1060
|
+
_mm256_set_epi64x(0x0303030303030303, 0x0202020202020202,
|
1061
|
+
0x0101010101010101, 0x0000000000000000))));
|
1062
|
+
return _mm256_andnot_si256(bytes, _mm256_set1_epi8((char)0xF0));
|
1063
|
+
}
|
1064
|
+
|
1065
|
+
const TA *const A;
|
1066
|
+
const TB *const B;
|
1067
|
+
TC *const C;
|
1068
|
+
const int64_t k;
|
1069
|
+
const int64_t lda;
|
1070
|
+
const int64_t ldb;
|
1071
|
+
const int64_t ldc;
|
1072
|
+
const int ith;
|
1073
|
+
const int nth;
|
1074
|
+
__m128i iq4nlt;
|
1075
|
+
};
|
1076
|
+
#endif // __AVX__
|
1077
|
+
|
1078
|
+
//PPC Implementation
|
1079
|
+
#if defined(__MMA__)
|
1080
|
+
|
1081
|
+
#define SAVE_ACC(ACC, ii, jj) \
|
1082
|
+
__builtin_mma_disassemble_acc(vec_C, ACC); \
|
1083
|
+
for (int I = 0; I < 4; I++) { \
|
1084
|
+
for (int J = 0; J < 4; J++) { \
|
1085
|
+
*((float*)(C+ii+((jj+J)*ldc)+I)) = *((float*)&vec_C[I]+J); \
|
1086
|
+
} \
|
1087
|
+
} \
|
1088
|
+
|
1089
|
+
template <typename TA, typename TB, typename TC>
|
1090
|
+
class tinyBLAS_BF16_PPC {
|
1091
|
+
public:
|
1092
|
+
tinyBLAS_BF16_PPC(int64_t k,
|
1093
|
+
const TA *A, int64_t lda,
|
1094
|
+
const TB *B, int64_t ldb,
|
1095
|
+
TC *C, int64_t ldc,
|
1096
|
+
int ith, int nth)
|
1097
|
+
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
|
1098
|
+
}
|
1099
|
+
|
1100
|
+
void matmul(int64_t m, int64_t n) {
|
1101
|
+
mnpack(0, m, 0, n);
|
1102
|
+
}
|
1103
|
+
|
1104
|
+
private:
|
1105
|
+
void vector_permute_store(vec_t *c, int numVec, unsigned char *vecOffset) {
|
1106
|
+
vec_t t[8], s[8];
|
1107
|
+
vec_t swiz1 = {0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23};
|
1108
|
+
vec_t swiz2 = {8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31};
|
1109
|
+
vec_t swiz3 = {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23};
|
1110
|
+
vec_t swiz4 = {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31};
|
1111
|
+
|
1112
|
+
if (numVec == 2) {
|
1113
|
+
t[0] = vec_perm(c[0], c[1], swiz1);
|
1114
|
+
t[1] = vec_perm(c[2], c[3], swiz1);
|
1115
|
+
s[0] = vec_perm(t[0], t[1], swiz3);
|
1116
|
+
s[1] = vec_perm(t[0], t[1], swiz4);
|
1117
|
+
vec_xst(s[0], 0, (vec_t*)vecOffset);
|
1118
|
+
vec_xst(s[1], 0, (vec_t*)(vecOffset + 16));
|
1119
|
+
} else if (numVec == 4) {
|
1120
|
+
t[0] = vec_perm(c[0], c[1], swiz1);
|
1121
|
+
t[1] = vec_perm(c[0], c[1], swiz2);
|
1122
|
+
t[2] = vec_perm(c[2], c[3], swiz1);
|
1123
|
+
t[3] = vec_perm(c[2], c[3], swiz2);
|
1124
|
+
s[0] = vec_perm(t[0], t[2], swiz3);
|
1125
|
+
s[1] = vec_perm(t[0], t[2], swiz4);
|
1126
|
+
s[2] = vec_perm(t[1], t[3], swiz3);
|
1127
|
+
s[3] = vec_perm(t[1], t[3], swiz4);
|
1128
|
+
for (int i = 0; i < 4; ++i)
|
1129
|
+
vec_xst(s[i], 0, (vec_t*)(vecOffset + i * 16));
|
1130
|
+
} else if (numVec == 8) {
|
1131
|
+
for (int i = 0; i < 4; i += 2) {
|
1132
|
+
t[i+0] = vec_perm(c[i+0], c[i+1], swiz1);
|
1133
|
+
t[i+1] = vec_perm(c[i+0], c[i+1], swiz2);
|
1134
|
+
}
|
1135
|
+
for (int i = 4; i < 8; i += 2) {
|
1136
|
+
t[i+0] = vec_perm(c[i+0], c[i+1], swiz1);
|
1137
|
+
t[i+1] = vec_perm(c[i+0], c[i+1], swiz2);
|
1138
|
+
}
|
1139
|
+
s[0] = vec_perm(t[0], t[2], swiz3);
|
1140
|
+
s[1] = vec_perm(t[0], t[2], swiz4);
|
1141
|
+
s[2] = vec_perm(t[1], t[3], swiz3);
|
1142
|
+
s[3] = vec_perm(t[1], t[3], swiz4);
|
1143
|
+
s[4] = vec_perm(t[4], t[6], swiz3);
|
1144
|
+
s[5] = vec_perm(t[4], t[6], swiz4);
|
1145
|
+
s[6] = vec_perm(t[5], t[7], swiz3);
|
1146
|
+
s[7] = vec_perm(t[5], t[7], swiz4);
|
1147
|
+
for (int i = 0; i < 8; ++i)
|
1148
|
+
vec_xst(s[i], 0, (vec_t*)(vecOffset + i * 16));
|
1149
|
+
}
|
1150
|
+
}
|
1151
|
+
|
1152
|
+
void packNormal(const TA* a, int64_t lda, int rows, int cols, unsigned char* vec) {
|
1153
|
+
int64_t i, j;
|
1154
|
+
TA *aoffset = NULL;
|
1155
|
+
unsigned char *vecOffset = NULL;
|
1156
|
+
TA * aoffsets[8];
|
1157
|
+
vector unsigned char c_arr[8];
|
1158
|
+
aoffset = const_cast<TA*>(a);
|
1159
|
+
vecOffset = vec;
|
1160
|
+
j = (rows >> 3);
|
1161
|
+
if (j > 0) {
|
1162
|
+
do {
|
1163
|
+
if (cols == 4) {
|
1164
|
+
aoffsets[0] = aoffset;
|
1165
|
+
for (int it = 1; it < 4; ++it)
|
1166
|
+
aoffsets[it] = aoffsets[it-1] + lda;
|
1167
|
+
aoffset += 4 * lda;
|
1168
|
+
for (int i = 0; i < 4; ++i)
|
1169
|
+
c_arr[i] = vec_xl(0, (vector unsigned char*)aoffsets[i]);
|
1170
|
+
vector_permute_store(c_arr, 4, vecOffset);
|
1171
|
+
for (int i = 0; i<4; i++)
|
1172
|
+
aoffsets[i] = aoffsets[i]+lda;
|
1173
|
+
vecOffset +=64;
|
1174
|
+
}
|
1175
|
+
i = (cols >> 3);
|
1176
|
+
if (i > 0) {
|
1177
|
+
aoffsets[0] = aoffset;
|
1178
|
+
for (int it = 1; it < 8; ++it) {
|
1179
|
+
aoffsets[it] = aoffsets[it-1] + lda;
|
1180
|
+
}
|
1181
|
+
aoffset += 8 * lda;
|
1182
|
+
do {
|
1183
|
+
for (int it = 0; it < 8; ++it)
|
1184
|
+
c_arr[it] = vec_xl(0, (vector unsigned char*)aoffsets[it]);
|
1185
|
+
vector_permute_store(c_arr, 8, vecOffset);
|
1186
|
+
for (int it = 0; it < 8; ++it)
|
1187
|
+
aoffsets[it] = aoffsets[it] + 8*lda;
|
1188
|
+
vecOffset += 128;
|
1189
|
+
i--;
|
1190
|
+
} while(i > 0);
|
1191
|
+
}
|
1192
|
+
j--;
|
1193
|
+
} while(j > 0);
|
1194
|
+
}
|
1195
|
+
if (rows & 4) {
|
1196
|
+
aoffsets[0] = aoffset;
|
1197
|
+
for (int it = 1; it < 4; ++it)
|
1198
|
+
aoffsets[it] = aoffsets[it-1] + lda;
|
1199
|
+
aoffset += 4 * lda;
|
1200
|
+
if (cols == 4) {
|
1201
|
+
for (int it = 0; it < 4; ++it)
|
1202
|
+
c_arr[it] = vec_xl(0, (vector unsigned char*)aoffsets[it]);
|
1203
|
+
vector_permute_store(c_arr, 2, vecOffset);
|
1204
|
+
for (int it = 0; it< 4; it++)
|
1205
|
+
aoffsets[it] = aoffsets[it] + lda;
|
1206
|
+
vecOffset += 32;
|
1207
|
+
}
|
1208
|
+
i = (cols >> 3);
|
1209
|
+
if (i > 0) {
|
1210
|
+
do {
|
1211
|
+
for (int it = 0; it < 4; ++it)
|
1212
|
+
c_arr[it] = vec_xl(0, (vector unsigned char*)aoffsets[it]);
|
1213
|
+
vector_permute_store(c_arr, 4, vecOffset);
|
1214
|
+
for (int it = 0; it< 4; it++)
|
1215
|
+
aoffsets[it] = aoffsets[it] + 8*lda;
|
1216
|
+
vecOffset += 64;
|
1217
|
+
i--;
|
1218
|
+
} while(i > 0);
|
1219
|
+
}
|
1220
|
+
}
|
1221
|
+
if (rows & 3) {
|
1222
|
+
aoffsets[0] = aoffset;
|
1223
|
+
for (int it = 1; it < 4; ++it)
|
1224
|
+
aoffsets[it] = aoffsets[it-1] + lda;
|
1225
|
+
if (cols == 4) {
|
1226
|
+
switch(rows) {
|
1227
|
+
case 3: c_arr[2] = vec_xl(0, (vector unsigned char*)aoffsets[2]);
|
1228
|
+
case 2: c_arr[1] = vec_xl(0, (vector unsigned char*)aoffsets[1]);
|
1229
|
+
case 1: c_arr[0] = vec_xl(0, (vector unsigned char*)aoffsets[0]);
|
1230
|
+
break;
|
1231
|
+
}
|
1232
|
+
vector_permute_store(c_arr, 2, vecOffset);
|
1233
|
+
for (int it = 0; it< 4; it++)
|
1234
|
+
aoffsets[it] = aoffsets[it] + lda;
|
1235
|
+
vecOffset += 32;
|
1236
|
+
}
|
1237
|
+
i = (cols >> 3);
|
1238
|
+
if (i > 0) {
|
1239
|
+
do {
|
1240
|
+
switch(rows) {
|
1241
|
+
case 3: c_arr[2] = vec_xl(0, (vector unsigned char*)aoffsets[2]);
|
1242
|
+
case 2: c_arr[1] = vec_xl(0, (vector unsigned char*)aoffsets[1]);
|
1243
|
+
case 1: c_arr[0] = vec_xl(0, (vector unsigned char*)aoffsets[0]);
|
1244
|
+
break;
|
1245
|
+
}
|
1246
|
+
vector_permute_store(c_arr, 4, vecOffset);
|
1247
|
+
for (int it = 0; it <4; it++)
|
1248
|
+
aoffsets[it] = aoffsets[it] + 8* lda;
|
1249
|
+
vecOffset += 64;
|
1250
|
+
i--;
|
1251
|
+
} while(i > 0);
|
1252
|
+
}
|
1253
|
+
}
|
1254
|
+
}
|
1255
|
+
|
1256
|
+
void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
1257
|
+
int64_t mc, nc, mp, np;
|
1258
|
+
int m_rem = MIN(m - m0, 8);
|
1259
|
+
int n_rem = MIN(n - n0, 8);
|
1260
|
+
|
1261
|
+
if (m_rem >= 8 && n_rem >= 8) {
|
1262
|
+
mc = 8;
|
1263
|
+
nc = 8;
|
1264
|
+
gemm<8,8>(m0, m, n0, n);
|
1265
|
+
} else if (m_rem >= 4 && n_rem >= 8) {
|
1266
|
+
mc = 4;
|
1267
|
+
nc = 8;
|
1268
|
+
gemm<4,8>(m0, m, n0, n);
|
1269
|
+
} else if (m_rem >=8 && n_rem >=4){
|
1270
|
+
mc = 8;
|
1271
|
+
nc = 4;
|
1272
|
+
gemm<8,4>(m0, m, n0, n);
|
1273
|
+
} else if ((m_rem < 4) && (n_rem >= 8)) {
|
1274
|
+
nc = 8;
|
1275
|
+
switch(m_rem) {
|
1276
|
+
case 1:
|
1277
|
+
mc = 1;
|
1278
|
+
gemm_Mx8<1>(m0, m, n0, n);
|
1279
|
+
break;
|
1280
|
+
case 2:
|
1281
|
+
mc = 2;
|
1282
|
+
gemm_Mx8<2>(m0, m, n0, n);
|
1283
|
+
break;
|
1284
|
+
case 3:
|
1285
|
+
mc = 3;
|
1286
|
+
gemm_Mx8<3>(m0, m, n0, n);
|
1287
|
+
break;
|
1288
|
+
default:
|
1289
|
+
return;
|
1290
|
+
}
|
1291
|
+
} else if (m_rem >= 4 && n_rem >= 4) {
|
1292
|
+
mc = 4;
|
1293
|
+
nc = 4;
|
1294
|
+
gemm_small<4, 4>(m0, m, n0, n);
|
1295
|
+
} else if ((m_rem > 4) && (n_rem < 4)) {
|
1296
|
+
mc = 4;
|
1297
|
+
switch(n_rem) {
|
1298
|
+
case 1:
|
1299
|
+
nc = 1;
|
1300
|
+
gemm_small<4, 1>(m0, m, n0, n);
|
1301
|
+
break;
|
1302
|
+
case 2:
|
1303
|
+
nc = 2;
|
1304
|
+
gemm_small<4, 2>(m0, m, n0, n);
|
1305
|
+
break;
|
1306
|
+
case 3:
|
1307
|
+
nc = 3;
|
1308
|
+
gemm_small<4, 3>(m0, m, n0, n);
|
1309
|
+
break;
|
1310
|
+
|
1311
|
+
default:
|
1312
|
+
return;
|
1313
|
+
}
|
1314
|
+
} else {
|
1315
|
+
switch((m_rem << 4) | n_rem) {
|
1316
|
+
case 0x43:
|
1317
|
+
mc = 4;
|
1318
|
+
nc = 3;
|
1319
|
+
gemm_small<4, 3>(m0, m, n0, n);
|
1320
|
+
break;
|
1321
|
+
case 0x42:
|
1322
|
+
mc = 4;
|
1323
|
+
nc = 2;
|
1324
|
+
gemm_small<4, 2>(m0, m, n0, n);
|
1325
|
+
break;
|
1326
|
+
case 0x41:
|
1327
|
+
mc = 4;
|
1328
|
+
nc = 1;
|
1329
|
+
gemm_small<4, 1>(m0, m, n0, n);
|
1330
|
+
break;
|
1331
|
+
case 0x34:
|
1332
|
+
mc = 3;
|
1333
|
+
nc = 4;
|
1334
|
+
gemm_small<3, 4>(m0, m, n0, n);
|
1335
|
+
break;
|
1336
|
+
case 0x33:
|
1337
|
+
mc = 3;
|
1338
|
+
nc = 3;
|
1339
|
+
gemm_small<3, 3>(m0, m, n0, n);
|
1340
|
+
break;
|
1341
|
+
case 0x32:
|
1342
|
+
mc = 3;
|
1343
|
+
nc = 2;
|
1344
|
+
gemm_small<3, 2>(m0, m, n0, n);
|
1345
|
+
break;
|
1346
|
+
case 0x31:
|
1347
|
+
mc = 3;
|
1348
|
+
nc = 1;
|
1349
|
+
gemm_small<3, 1>(m0, m, n0, n);
|
1350
|
+
break;
|
1351
|
+
case 0x24:
|
1352
|
+
mc = 2;
|
1353
|
+
nc = 4;
|
1354
|
+
gemm_small<2,4>(m0, m, n0, n);
|
1355
|
+
break;
|
1356
|
+
case 0x23:
|
1357
|
+
mc = 2;
|
1358
|
+
nc = 3;
|
1359
|
+
gemm_small<2, 3>(m0, m, n0, n);
|
1360
|
+
break;
|
1361
|
+
case 0x22:
|
1362
|
+
mc = 2;
|
1363
|
+
nc = 2;
|
1364
|
+
gemm_small<2, 2>(m0, m, n0, n);
|
1365
|
+
break;
|
1366
|
+
case 0x21:
|
1367
|
+
mc = 2;
|
1368
|
+
nc = 1;
|
1369
|
+
gemm_small<2, 1>(m0, m, n0, n);
|
1370
|
+
break;
|
1371
|
+
case 0x14:
|
1372
|
+
mc = 1;
|
1373
|
+
nc = 4;
|
1374
|
+
gemm_small<1, 4>(m0, m, n0, n);
|
1375
|
+
break;
|
1376
|
+
case 0x13:
|
1377
|
+
mc = 1;
|
1378
|
+
nc = 3;
|
1379
|
+
gemm_small<1, 3>(m0, m, n0, n);
|
1380
|
+
break;
|
1381
|
+
case 0x12:
|
1382
|
+
mc = 1;
|
1383
|
+
nc = 2;
|
1384
|
+
gemm_small<1, 2>(m0, m, n0, n);
|
1385
|
+
break;
|
1386
|
+
case 0x11:
|
1387
|
+
mc = 1;
|
1388
|
+
nc = 1;
|
1389
|
+
gemm_small<1, 1>(m0, m, n0, n);
|
1390
|
+
break;
|
1391
|
+
default:
|
1392
|
+
return;
|
1393
|
+
}
|
1394
|
+
}
|
1395
|
+
mp = m0 + (m - m0) / mc * mc;
|
1396
|
+
np = n0 + (n - n0) / nc * nc;
|
1397
|
+
mnpack(mp, m, n0, np);
|
1398
|
+
mnpack(m0, m, np, n);
|
1399
|
+
}
|
1400
|
+
|
1401
|
+
void KERNEL_4x8(int64_t ii, int64_t jj) {
|
1402
|
+
vec_t vec_A[4], vec_B[8] , vec_C[4];
|
1403
|
+
acc_t acc_0, acc_1;
|
1404
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
1405
|
+
__builtin_mma_xxsetaccz(&acc_1);
|
1406
|
+
for (int l = 0; l < k; l+=8) {
|
1407
|
+
packNormal((A+(ii*lda)+l), lda, 4, 8, (uint8_t*)vec_A);
|
1408
|
+
packNormal((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B);
|
1409
|
+
for (int x = 0; x < 4; x++) {
|
1410
|
+
__builtin_mma_xvbf16ger2pp(&acc_0, vec_A[x], vec_B[x]);
|
1411
|
+
__builtin_mma_xvbf16ger2pp(&acc_1, vec_A[x], vec_B[x+4]);
|
1412
|
+
}
|
1413
|
+
}
|
1414
|
+
SAVE_ACC(&acc_0, ii, jj);
|
1415
|
+
SAVE_ACC(&acc_1, ii, jj+4);
|
1416
|
+
}
|
1417
|
+
|
1418
|
+
void KERNEL_8x4(int64_t ii, int64_t jj) {
|
1419
|
+
vec_t vec_A[8], vec_B[4] , vec_C[4];
|
1420
|
+
acc_t acc_0, acc_1;
|
1421
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
1422
|
+
__builtin_mma_xxsetaccz(&acc_1);
|
1423
|
+
for (int l = 0; l < k; l+=8) {
|
1424
|
+
packNormal((A+(ii*lda)+l), lda, 8, 8, (uint8_t*)vec_A);
|
1425
|
+
packNormal((B+(jj*ldb)+l), ldb, 8, 4, (uint8_t*)vec_B);
|
1426
|
+
for (int x = 0; x < 4; x++) {
|
1427
|
+
__builtin_mma_xvbf16ger2pp(&acc_0, vec_A[x], vec_B[x]);
|
1428
|
+
__builtin_mma_xvbf16ger2pp(&acc_1, vec_A[x+4], vec_B[x]);
|
1429
|
+
}
|
1430
|
+
}
|
1431
|
+
SAVE_ACC(&acc_0, ii, jj);
|
1432
|
+
SAVE_ACC(&acc_1, ii+4, jj);
|
1433
|
+
}
|
1434
|
+
|
1435
|
+
|
1436
|
+
void KERNEL_8x8(int64_t ii, int64_t jj) {
|
1437
|
+
vec_t vec_A[8], vec_B[8], vec_C[4];
|
1438
|
+
acc_t acc_0, acc_1, acc_2, acc_3;
|
1439
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
1440
|
+
__builtin_mma_xxsetaccz(&acc_1);
|
1441
|
+
__builtin_mma_xxsetaccz(&acc_2);
|
1442
|
+
__builtin_mma_xxsetaccz(&acc_3);
|
1443
|
+
for (int l = 0; l < k; l+=8) {
|
1444
|
+
packNormal(A+(ii*lda)+l, lda, 8, 8, (uint8_t*)vec_A);
|
1445
|
+
packNormal(B+(jj*ldb)+l, ldb, 8, 8, (uint8_t*)vec_B);
|
1446
|
+
for (int x = 0; x < 4; x++) {
|
1447
|
+
__builtin_mma_xvbf16ger2pp(&acc_0, vec_A[x], vec_B[x]);
|
1448
|
+
__builtin_mma_xvbf16ger2pp(&acc_1, (vec_t)vec_A[x], (vec_t)vec_B[x+4]);
|
1449
|
+
__builtin_mma_xvbf16ger2pp(&acc_2, (vec_t)vec_A[x+4], (vec_t)vec_B[x]);
|
1450
|
+
__builtin_mma_xvbf16ger2pp(&acc_3, (vec_t)vec_A[x+4], (vec_t)vec_B[x+4]);
|
1451
|
+
}
|
1452
|
+
}
|
1453
|
+
|
1454
|
+
SAVE_ACC(&acc_0, ii, jj);
|
1455
|
+
SAVE_ACC(&acc_1, ii, jj+4);
|
1456
|
+
SAVE_ACC(&acc_2, ii+4, jj);
|
1457
|
+
SAVE_ACC(&acc_3, ii+4, jj+4);
|
1458
|
+
}
|
1459
|
+
|
1460
|
+
template<int RM, int RN>
|
1461
|
+
void gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
1462
|
+
int64_t ytiles = (m - m0) / RM;
|
1463
|
+
int64_t xtiles = (n - n0) / RN;
|
1464
|
+
int64_t tiles = xtiles * ytiles;
|
1465
|
+
int64_t duty = (tiles + nth - 1) / nth;
|
1466
|
+
int64_t start = duty * ith;
|
1467
|
+
int64_t end = start + duty;
|
1468
|
+
if (end > tiles)
|
1469
|
+
end = tiles;
|
1470
|
+
for (int64_t job = start; job < end; ++job) {
|
1471
|
+
int64_t ii = m0 + job / xtiles * RM;
|
1472
|
+
int64_t jj = n0 + job % xtiles * RN;
|
1473
|
+
vec_t vec_C[4];
|
1474
|
+
acc_t acc_0;
|
1475
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
1476
|
+
vec_t vec_A[2], vec_B[2];
|
1477
|
+
for (int l=0; l<k; l+=4) {
|
1478
|
+
packNormal(A+(ii*lda)+l, lda, RM, 4, (uint8_t*)vec_A);
|
1479
|
+
packNormal(B+(jj*ldb)+l, ldb, RN, 4, (uint8_t*)vec_B);
|
1480
|
+
for (int x = 0; x<2; x++) {
|
1481
|
+
__builtin_mma_xvbf16ger2pp(&acc_0, vec_A[x], vec_B[x]);
|
1482
|
+
}
|
1483
|
+
}
|
1484
|
+
__builtin_mma_disassemble_acc(vec_C, &acc_0);
|
1485
|
+
for (int I = 0; I < RM; I++) {
|
1486
|
+
for (int J = 0; J < RN; J++) {
|
1487
|
+
*((TC*)(C+ii+((jj+J)*ldc)+I)) = *((TC*)&vec_C[I]+J);
|
1488
|
+
}
|
1489
|
+
}
|
1490
|
+
}
|
1491
|
+
}
|
1492
|
+
|
1493
|
+
template<int RM>
|
1494
|
+
void gemm_Mx8(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
1495
|
+
int RN = 8;
|
1496
|
+
int64_t ytiles = (m - m0) / RM;
|
1497
|
+
int64_t xtiles = (n - n0) / RN;
|
1498
|
+
int64_t tiles = xtiles * ytiles;
|
1499
|
+
int64_t duty = (tiles + nth - 1) / nth;
|
1500
|
+
int64_t start = duty * ith;
|
1501
|
+
int64_t end = start + duty;
|
1502
|
+
if (end > tiles)
|
1503
|
+
end = tiles;
|
1504
|
+
for (int64_t job = start; job < end; ++job) {
|
1505
|
+
int64_t ii = m0 + job / xtiles * RM;
|
1506
|
+
int64_t jj = n0 + job % xtiles * RN;
|
1507
|
+
vec_t vec_C[4];
|
1508
|
+
acc_t acc_0, acc_1;
|
1509
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
1510
|
+
__builtin_mma_xxsetaccz(&acc_1);
|
1511
|
+
vec_t vec_A[4], vec_B[8];
|
1512
|
+
for (int l=0; l<k; l+=8) {
|
1513
|
+
packNormal(A+(ii*lda)+l, lda, RM, 8, (uint8_t*)vec_A);
|
1514
|
+
packNormal(B+(jj*ldb)+l, ldb, RN, 8, (uint8_t*)vec_B);
|
1515
|
+
for (int x = 0; x<4; x++) {
|
1516
|
+
__builtin_mma_xvbf16ger2pp(&acc_0, vec_A[x], vec_B[x]);
|
1517
|
+
__builtin_mma_xvbf16ger2pp(&acc_1, vec_A[x], vec_B[x+4]);
|
1518
|
+
}
|
1519
|
+
}
|
1520
|
+
__builtin_mma_disassemble_acc(vec_C, &acc_0);
|
1521
|
+
for (int I = 0; I < RM; I++) {
|
1522
|
+
for (int J = 0; J < 4; J++) {
|
1523
|
+
*((TC*)(C+ii+((jj+J)*ldc)+I)) = *((TC*)&vec_C[I]+J);
|
1524
|
+
}
|
1525
|
+
}
|
1526
|
+
__builtin_mma_disassemble_acc(vec_C, &acc_1);
|
1527
|
+
for (int I = 0; I < RM; I++) {
|
1528
|
+
for (int J = 0; J < 4; J++) {
|
1529
|
+
*((TC*)(C+ii+((jj+4+J)*ldc)+I)) = *((TC*)&vec_C[I]+J);
|
1530
|
+
}
|
1531
|
+
}
|
1532
|
+
}
|
1533
|
+
}
|
1534
|
+
|
1535
|
+
template<int RM, int RN>
|
1536
|
+
inline void kernel(int64_t ii, int64_t jj) {
|
1537
|
+
if constexpr(RM == 4 && RN == 8) {
|
1538
|
+
KERNEL_4x8(ii,jj);
|
1539
|
+
} else if constexpr(RM == 8 && RN == 8) {
|
1540
|
+
KERNEL_8x8(ii,jj);
|
1541
|
+
} else if constexpr(RM == 8 && RN == 4) {
|
1542
|
+
KERNEL_8x4(ii,jj);
|
1543
|
+
} else {
|
1544
|
+
static_assert(false, "RN/RM values not supported");
|
1545
|
+
}
|
1546
|
+
}
|
1547
|
+
|
1548
|
+
template <int RM, int RN>
|
1549
|
+
NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
1550
|
+
int64_t ytiles = (m - m0) / RM;
|
1551
|
+
int64_t xtiles = (n - n0) / RN;
|
1552
|
+
int64_t tiles = xtiles * ytiles;
|
1553
|
+
int64_t duty = (tiles + nth - 1) / nth;
|
1554
|
+
int64_t start = duty * ith;
|
1555
|
+
int64_t end = start + duty;
|
1556
|
+
if (end > tiles)
|
1557
|
+
end = tiles;
|
1558
|
+
for (int64_t job = start; job < end; ++job) {
|
1559
|
+
int64_t ii = m0 + job / xtiles * RM;
|
1560
|
+
int64_t jj = n0 + job % xtiles * RN;
|
1561
|
+
kernel<RM, RN>(ii, jj);
|
1562
|
+
}
|
1563
|
+
}
|
1564
|
+
|
1565
|
+
const TA *const A;
|
1566
|
+
const TB *const B;
|
1567
|
+
TC *C;
|
1568
|
+
const int64_t k;
|
1569
|
+
const int64_t lda;
|
1570
|
+
const int64_t ldb;
|
1571
|
+
const int64_t ldc;
|
1572
|
+
const int ith;
|
1573
|
+
const int nth;
|
1574
|
+
};
|
1575
|
+
|
1576
|
+
template <typename TA, typename TB, typename TC>
|
1577
|
+
class tinyBLAS_Q0_PPC {
|
1578
|
+
public:
|
1579
|
+
tinyBLAS_Q0_PPC(int64_t k,
|
1580
|
+
const TA *A, int64_t lda,
|
1581
|
+
const TB *B, int64_t ldb,
|
1582
|
+
TC *C, int64_t ldc,
|
1583
|
+
int ith, int nth)
|
1584
|
+
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
|
1585
|
+
}
|
1586
|
+
|
1587
|
+
void matmul(int64_t m, int64_t n) {
|
1588
|
+
mnpack(0, m, 0, n);
|
1589
|
+
}
|
1590
|
+
|
1591
|
+
private:
|
1592
|
+
|
1593
|
+
template<int RM, int RN>
|
1594
|
+
inline void save_res(int ii, int jj, int idx, vector float* fin_res) {
|
1595
|
+
for (int I = 0; I < RM; I++) {
|
1596
|
+
for (int J = 0; J < RN; J++) {
|
1597
|
+
*((float*)(C+ii+((jj+J)*ldc)+I)) = *((float*)&fin_res[idx+I]+J);
|
1598
|
+
}
|
1599
|
+
}
|
1600
|
+
}
|
1601
|
+
|
1602
|
+
template<int size>
|
1603
|
+
inline void compute(acc_t* ACC, int c_idx, int s_idx, std::array<int, size>& comparray, vector float* vs, vector float* fin_res) {
|
1604
|
+
vector signed int vec_C[4];
|
1605
|
+
vector float CA[4] = {0};
|
1606
|
+
vector float res[4] = {0};
|
1607
|
+
__builtin_mma_disassemble_acc(vec_C, ACC);
|
1608
|
+
for (int i = 0; i < 4; i++) {
|
1609
|
+
CA[i] = vec_splats((float)(((double)comparray[c_idx+i]) * -128.0));
|
1610
|
+
res[i] = vec_add(vec_ctf(vec_C[i], 0), CA[i]);
|
1611
|
+
fin_res[s_idx+i] = vec_madd(res[i], vs[s_idx+i], fin_res[s_idx+i]);
|
1612
|
+
}
|
1613
|
+
}
|
1614
|
+
|
1615
|
+
template<typename VA, typename VB, int size>
|
1616
|
+
void packNormalInt4(const TA* a, int64_t lda, int rows, int cols, VA* vec, std::array<int, size>& comparray) {
|
1617
|
+
int64_t i, j;
|
1618
|
+
TA *aoffset = NULL;
|
1619
|
+
VA *vecOffset = NULL;
|
1620
|
+
TA *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL;
|
1621
|
+
TA *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL;
|
1622
|
+
VB c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2] = {0};
|
1623
|
+
VB c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2] = {0};
|
1624
|
+
VB t1, t2, t3, t4, t5, t6, t7, t8;
|
1625
|
+
const vector signed char lowMask = vec_splats((signed char)0xF);
|
1626
|
+
const vector unsigned char v4 = vec_splats((unsigned char)0x4);
|
1627
|
+
const vector signed char v8 = vec_splats((signed char)0x8);
|
1628
|
+
aoffset = const_cast<TA*>(a);
|
1629
|
+
vecOffset = vec;
|
1630
|
+
vector unsigned char swiz1 = {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23};
|
1631
|
+
vector unsigned char swiz2 = {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31};
|
1632
|
+
vector unsigned char swiz3 = {0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27};
|
1633
|
+
vector unsigned char swiz4 = {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31};
|
1634
|
+
vector signed int vsum = {0};
|
1635
|
+
vector signed int vsum2 = {0};
|
1636
|
+
|
1637
|
+
j = (rows >> 3);
|
1638
|
+
if (j > 0) {
|
1639
|
+
do {
|
1640
|
+
aoffset1 = aoffset;
|
1641
|
+
aoffset2 = aoffset1 + lda;
|
1642
|
+
aoffset3 = aoffset2 + lda;
|
1643
|
+
aoffset4 = aoffset3 + lda;
|
1644
|
+
aoffset5 = aoffset4 + lda;
|
1645
|
+
aoffset6 = aoffset5 + lda;
|
1646
|
+
aoffset7 = aoffset6 + lda;
|
1647
|
+
aoffset8 = aoffset7 + lda;
|
1648
|
+
aoffset += 8 * lda;
|
1649
|
+
|
1650
|
+
i = (cols >> 2);
|
1651
|
+
if (i > 0) {
|
1652
|
+
do {
|
1653
|
+
c1[1] = reinterpret_cast<VB>(vec_xl(0, aoffset1->qs));
|
1654
|
+
c2[1] = reinterpret_cast<VB>(vec_xl(0, aoffset2->qs));
|
1655
|
+
c3[1] = reinterpret_cast<VB>(vec_xl(0, aoffset3->qs));
|
1656
|
+
c4[1] = reinterpret_cast<VB>(vec_xl(0, aoffset4->qs));
|
1657
|
+
c5[1] = reinterpret_cast<VB>(vec_xl(0, aoffset5->qs));
|
1658
|
+
c6[1] = reinterpret_cast<VB>(vec_xl(0, aoffset6->qs));
|
1659
|
+
c7[1] = reinterpret_cast<VB>(vec_xl(0, aoffset7->qs));
|
1660
|
+
c8[1] = reinterpret_cast<VB>(vec_xl(0, aoffset8->qs));
|
1661
|
+
|
1662
|
+
c1[0] = vec_and(c1[1], lowMask);
|
1663
|
+
c1[1] = vec_sr(c1[1], v4);
|
1664
|
+
c1[0] = vec_sub(c1[0], v8);
|
1665
|
+
c1[1] = vec_sub(c1[1], v8);
|
1666
|
+
vsum = vec_sum4s(c1[0], vsum);
|
1667
|
+
vsum2 = vec_sum4s(c1[1], vsum2);
|
1668
|
+
vsum = vec_add(vsum, vsum2);
|
1669
|
+
comparray[0] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1670
|
+
vsum = vec_splats(0);
|
1671
|
+
vsum2 = vec_splats(0);
|
1672
|
+
|
1673
|
+
c2[0] = vec_and(c2[1], lowMask);
|
1674
|
+
c2[1] = vec_sr(c2[1], v4);
|
1675
|
+
c2[0] = vec_sub(c2[0], v8);
|
1676
|
+
c2[1] = vec_sub(c2[1], v8);
|
1677
|
+
vsum = vec_sum4s(c2[0], vsum);
|
1678
|
+
vsum2 = vec_sum4s(c2[1], vsum2);
|
1679
|
+
vsum = vec_add(vsum, vsum2);
|
1680
|
+
comparray[1] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1681
|
+
vsum = vec_splats(0);
|
1682
|
+
vsum2 = vec_splats(0);
|
1683
|
+
|
1684
|
+
c3[0] = vec_and(c3[1], lowMask);
|
1685
|
+
c3[1] = vec_sr(c3[1], v4);
|
1686
|
+
c3[0] = vec_sub(c3[0], v8);
|
1687
|
+
c3[1] = vec_sub(c3[1], v8);
|
1688
|
+
vsum = vec_sum4s(c3[0], vsum);
|
1689
|
+
vsum2 = vec_sum4s(c3[1], vsum2);
|
1690
|
+
vsum = vec_add(vsum, vsum2);
|
1691
|
+
comparray[2] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1692
|
+
vsum = vec_splats(0);
|
1693
|
+
vsum2 = vec_splats(0);
|
1694
|
+
|
1695
|
+
c4[0] = vec_and(c4[1], lowMask);
|
1696
|
+
c4[1] = vec_sr(c4[1], v4);
|
1697
|
+
c4[0] = vec_sub(c4[0], v8);
|
1698
|
+
c4[1] = vec_sub(c4[1], v8);
|
1699
|
+
vsum = vec_sum4s(c4[0], vsum);
|
1700
|
+
vsum2 = vec_sum4s(c4[1], vsum2);
|
1701
|
+
vsum = vec_add(vsum, vsum2);
|
1702
|
+
comparray[3] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1703
|
+
vsum = vec_splats(0);
|
1704
|
+
vsum2 = vec_splats(0);
|
1705
|
+
|
1706
|
+
c5[0] = vec_and(c5[1], lowMask);
|
1707
|
+
c5[1] = vec_sr(c5[1], v4);
|
1708
|
+
c5[0] = vec_sub(c5[0], v8);
|
1709
|
+
c5[1] = vec_sub(c5[1], v8);
|
1710
|
+
vsum = vec_sum4s(c5[0], vsum);
|
1711
|
+
vsum2 = vec_sum4s(c5[1], vsum2);
|
1712
|
+
vsum = vec_add(vsum, vsum2);
|
1713
|
+
comparray[4] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1714
|
+
vsum = vec_splats(0);
|
1715
|
+
vsum2 = vec_splats(0);
|
1716
|
+
|
1717
|
+
c6[0] = vec_and(c6[1], lowMask);
|
1718
|
+
c6[1] = vec_sr(c6[1], v4);
|
1719
|
+
c6[0] = vec_sub(c6[0], v8);
|
1720
|
+
c6[1] = vec_sub(c6[1], v8);
|
1721
|
+
vsum = vec_sum4s(c6[0], vsum);
|
1722
|
+
vsum2 = vec_sum4s(c6[1], vsum2);
|
1723
|
+
vsum = vec_add(vsum, vsum2);
|
1724
|
+
comparray[5] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1725
|
+
vsum = vec_splats(0);
|
1726
|
+
vsum2 = vec_splats(0);
|
1727
|
+
|
1728
|
+
c7[0] = vec_and(c7[1], lowMask);
|
1729
|
+
c7[1] = vec_sr(c7[1], v4);
|
1730
|
+
c7[0] = vec_sub(c7[0], v8);
|
1731
|
+
c7[1] = vec_sub(c7[1], v8);
|
1732
|
+
vsum = vec_sum4s(c7[0], vsum);
|
1733
|
+
vsum2 = vec_sum4s(c7[1], vsum2);
|
1734
|
+
vsum = vec_add(vsum, vsum2);
|
1735
|
+
comparray[6] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1736
|
+
vsum = vec_splats(0);
|
1737
|
+
vsum2 = vec_splats(0);
|
1738
|
+
|
1739
|
+
c8[0] = vec_and(c8[1], lowMask);
|
1740
|
+
c8[1] = vec_sr(c8[1], v4);
|
1741
|
+
c8[0] = vec_sub(c8[0], v8);
|
1742
|
+
c8[1] = vec_sub(c8[1], v8);
|
1743
|
+
vsum = vec_sum4s(c8[0], vsum);
|
1744
|
+
vsum2 = vec_sum4s(c8[1], vsum2);
|
1745
|
+
vsum = vec_add(vsum, vsum2);
|
1746
|
+
comparray[7] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1747
|
+
vsum = vec_splats(0);
|
1748
|
+
vsum2 = vec_splats(0);
|
1749
|
+
|
1750
|
+
t1 = vec_perm(c1[0], c2[0], swiz1);
|
1751
|
+
t2 = vec_perm(c1[0], c2[0], swiz2);
|
1752
|
+
t3 = vec_perm(c3[0], c4[0], swiz1);
|
1753
|
+
t4 = vec_perm(c3[0], c4[0], swiz2);
|
1754
|
+
t5 = vec_perm(t1, t3, swiz3);
|
1755
|
+
t6 = vec_perm(t1, t3, swiz4);
|
1756
|
+
t7 = vec_perm(t2, t4, swiz3);
|
1757
|
+
t8 = vec_perm(t2, t4, swiz4);
|
1758
|
+
vec_xst(t5, 0, vecOffset);
|
1759
|
+
vec_xst(t6, 0, vecOffset+16);
|
1760
|
+
vec_xst(t7, 0, vecOffset+32);
|
1761
|
+
vec_xst(t8, 0, vecOffset+48);
|
1762
|
+
|
1763
|
+
t1 = vec_perm(c1[1], c2[1], swiz1);
|
1764
|
+
t2 = vec_perm(c1[1], c2[1], swiz2);
|
1765
|
+
t3 = vec_perm(c3[1], c4[1], swiz1);
|
1766
|
+
t4 = vec_perm(c3[1], c4[1], swiz2);
|
1767
|
+
t5 = vec_perm(t1, t3, swiz3);
|
1768
|
+
t6 = vec_perm(t1, t3, swiz4);
|
1769
|
+
t7 = vec_perm(t2, t4, swiz3);
|
1770
|
+
t8 = vec_perm(t2, t4, swiz4);
|
1771
|
+
vec_xst(t5, 0, vecOffset+64);
|
1772
|
+
vec_xst(t6, 0, vecOffset+80);
|
1773
|
+
vec_xst(t7, 0, vecOffset+96);
|
1774
|
+
vec_xst(t8, 0, vecOffset+112);
|
1775
|
+
|
1776
|
+
t1 = vec_perm(c5[0], c6[0], swiz1);
|
1777
|
+
t2 = vec_perm(c5[0], c6[0], swiz2);
|
1778
|
+
t3 = vec_perm(c7[0], c8[0], swiz1);
|
1779
|
+
t4 = vec_perm(c7[0], c8[0], swiz2);
|
1780
|
+
t5 = vec_perm(t1, t3, swiz3);
|
1781
|
+
t6 = vec_perm(t1, t3, swiz4);
|
1782
|
+
t7 = vec_perm(t2, t4, swiz3);
|
1783
|
+
t8 = vec_perm(t2, t4, swiz4);
|
1784
|
+
vec_xst(t5, 0, vecOffset+128);
|
1785
|
+
vec_xst(t6, 0, vecOffset+144);
|
1786
|
+
vec_xst(t7, 0, vecOffset+160);
|
1787
|
+
vec_xst(t8, 0, vecOffset+176);
|
1788
|
+
|
1789
|
+
t1 = vec_perm(c5[1], c6[1], swiz1);
|
1790
|
+
t2 = vec_perm(c5[1], c6[1], swiz2);
|
1791
|
+
t3 = vec_perm(c7[1], c8[1], swiz1);
|
1792
|
+
t4 = vec_perm(c7[1], c8[1], swiz2);
|
1793
|
+
t5 = vec_perm(t1, t3, swiz3);
|
1794
|
+
t6 = vec_perm(t1, t3, swiz4);
|
1795
|
+
t7 = vec_perm(t2, t4, swiz3);
|
1796
|
+
t8 = vec_perm(t2, t4, swiz4);
|
1797
|
+
vec_xst(t5, 0, vecOffset+192);
|
1798
|
+
vec_xst(t6, 0, vecOffset+208);
|
1799
|
+
vec_xst(t7, 0, vecOffset+224);
|
1800
|
+
vec_xst(t8, 0, vecOffset+240);
|
1801
|
+
|
1802
|
+
aoffset1 += lda;
|
1803
|
+
aoffset2 += lda;
|
1804
|
+
aoffset3 += lda;
|
1805
|
+
aoffset4 += lda;
|
1806
|
+
aoffset5 += lda;
|
1807
|
+
aoffset6 += lda;
|
1808
|
+
aoffset7 += lda;
|
1809
|
+
aoffset8 += lda;
|
1810
|
+
vecOffset += 256;
|
1811
|
+
i--;
|
1812
|
+
} while (i > 0);
|
1813
|
+
}
|
1814
|
+
j--;
|
1815
|
+
} while (j > 0);
|
1816
|
+
}
|
1817
|
+
|
1818
|
+
if (rows & 4) {
|
1819
|
+
aoffset1 = aoffset;
|
1820
|
+
aoffset2 = aoffset1 + lda;
|
1821
|
+
aoffset3 = aoffset2 + lda;
|
1822
|
+
aoffset4 = aoffset3 + lda;
|
1823
|
+
aoffset += 4 * lda;
|
1824
|
+
|
1825
|
+
i = (cols >> 2);
|
1826
|
+
if (i > 0) {
|
1827
|
+
do {
|
1828
|
+
c1[1] = reinterpret_cast<VB>(vec_xl(0, aoffset1->qs));
|
1829
|
+
c2[1] = reinterpret_cast<VB>(vec_xl(0, aoffset2->qs));
|
1830
|
+
c3[1] = reinterpret_cast<VB>(vec_xl(0, aoffset3->qs));
|
1831
|
+
c4[1] = reinterpret_cast<VB>(vec_xl(0, aoffset4->qs));
|
1832
|
+
|
1833
|
+
c1[0] = vec_and(c1[1], lowMask);
|
1834
|
+
c1[1] = vec_sr(c1[1], v4);
|
1835
|
+
c1[0] = vec_sub(c1[0], v8);
|
1836
|
+
c1[1] = vec_sub(c1[1], v8);
|
1837
|
+
vsum = vec_sum4s(c1[0], vsum);
|
1838
|
+
vsum2 = vec_sum4s(c1[1], vsum2);
|
1839
|
+
vsum = vec_add(vsum, vsum2);
|
1840
|
+
comparray[0] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1841
|
+
vsum = vec_splats(0);
|
1842
|
+
vsum2 = vec_splats(0);
|
1843
|
+
|
1844
|
+
c2[0] = vec_and(c2[1], lowMask);
|
1845
|
+
c2[1] = vec_sr(c2[1], v4);
|
1846
|
+
c2[0] = vec_sub(c2[0], v8);
|
1847
|
+
c2[1] = vec_sub(c2[1], v8);
|
1848
|
+
vsum = vec_sum4s(c2[0], vsum);
|
1849
|
+
vsum2 = vec_sum4s(c2[1], vsum2);
|
1850
|
+
vsum = vec_add(vsum, vsum2);
|
1851
|
+
comparray[1] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1852
|
+
vsum = vec_splats(0);
|
1853
|
+
vsum2 = vec_splats(0);
|
1854
|
+
|
1855
|
+
c3[0] = vec_and(c3[1], lowMask);
|
1856
|
+
c3[1] = vec_sr(c3[1], v4);
|
1857
|
+
c3[0] = vec_sub(c3[0], v8);
|
1858
|
+
c3[1] = vec_sub(c3[1], v8);
|
1859
|
+
vsum = vec_sum4s(c3[0], vsum);
|
1860
|
+
vsum2 = vec_sum4s(c3[1], vsum2);
|
1861
|
+
vsum = vec_add(vsum, vsum2);
|
1862
|
+
comparray[2] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1863
|
+
vsum = vec_splats(0);
|
1864
|
+
vsum2 = vec_splats(0);
|
1865
|
+
|
1866
|
+
c4[0] = vec_and(c4[1], lowMask);
|
1867
|
+
c4[1] = vec_sr(c4[1], v4);
|
1868
|
+
c4[0] = vec_sub(c4[0], v8);
|
1869
|
+
c4[1] = vec_sub(c4[1], v8);
|
1870
|
+
vsum = vec_sum4s(c4[0], vsum);
|
1871
|
+
vsum2 = vec_sum4s(c4[1], vsum2);
|
1872
|
+
vsum = vec_add(vsum, vsum2);
|
1873
|
+
comparray[3] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1874
|
+
vsum = vec_splats(0);
|
1875
|
+
vsum2 = vec_splats( 0);
|
1876
|
+
|
1877
|
+
t1 = vec_perm(c1[0], c2[0], swiz1);
|
1878
|
+
t2 = vec_perm(c1[0], c2[0], swiz2);
|
1879
|
+
t3 = vec_perm(c3[0], c4[0], swiz1);
|
1880
|
+
t4 = vec_perm(c3[0], c4[0], swiz2);
|
1881
|
+
t5 = vec_perm(t1, t3, swiz3);
|
1882
|
+
t6 = vec_perm(t1, t3, swiz4);
|
1883
|
+
t7 = vec_perm(t2, t4, swiz3);
|
1884
|
+
t8 = vec_perm(t2, t4, swiz4);
|
1885
|
+
vec_xst(t5, 0, vecOffset);
|
1886
|
+
vec_xst(t6, 0, vecOffset+16);
|
1887
|
+
vec_xst(t7, 0, vecOffset+32);
|
1888
|
+
vec_xst(t8, 0, vecOffset+48);
|
1889
|
+
|
1890
|
+
t1 = vec_perm(c1[1], c2[1], swiz1);
|
1891
|
+
t2 = vec_perm(c1[1], c2[1], swiz2);
|
1892
|
+
t3 = vec_perm(c3[1], c4[1], swiz1);
|
1893
|
+
t4 = vec_perm(c3[1], c4[1], swiz2);
|
1894
|
+
t5 = vec_perm(t1, t3, swiz3);
|
1895
|
+
t6 = vec_perm(t1, t3, swiz4);
|
1896
|
+
t7 = vec_perm(t2, t4, swiz3);
|
1897
|
+
t8 = vec_perm(t2, t4, swiz4);
|
1898
|
+
vec_xst(t5, 0, vecOffset+64);
|
1899
|
+
vec_xst(t6, 0, vecOffset+80);
|
1900
|
+
vec_xst(t7, 0, vecOffset+96);
|
1901
|
+
vec_xst(t8, 0, vecOffset+112);
|
1902
|
+
|
1903
|
+
aoffset1 += lda;
|
1904
|
+
aoffset2 += lda;
|
1905
|
+
aoffset3 += lda;
|
1906
|
+
aoffset4 += lda;
|
1907
|
+
vecOffset += 128;
|
1908
|
+
i--;
|
1909
|
+
} while (i > 0);
|
1910
|
+
}
|
1911
|
+
}
|
1912
|
+
|
1913
|
+
if (rows & 3) {
|
1914
|
+
aoffset1 = aoffset;
|
1915
|
+
aoffset2 = aoffset1 + lda;
|
1916
|
+
aoffset3 = aoffset2 + lda;
|
1917
|
+
i = (cols >> 2);
|
1918
|
+
if (i > 0) {
|
1919
|
+
do {
|
1920
|
+
switch(rows) {
|
1921
|
+
case 3: c3[1] = reinterpret_cast<VB>(vec_xl(0, aoffset3->qs));
|
1922
|
+
case 2: c2[1] = reinterpret_cast<VB>(vec_xl(0, aoffset2->qs));
|
1923
|
+
case 1: c1[1] = reinterpret_cast<VB>(vec_xl(0, aoffset1->qs));
|
1924
|
+
break;
|
1925
|
+
}
|
1926
|
+
c1[0] = vec_and(c1[1], lowMask);
|
1927
|
+
c1[1] = vec_sr(c1[1], v4);
|
1928
|
+
c1[0] = vec_sub(c1[0], v8);
|
1929
|
+
c1[1] = vec_sub(c1[1], v8);
|
1930
|
+
vsum = vec_sum4s(c1[0], vsum);
|
1931
|
+
vsum2 = vec_sum4s(c1[1], vsum2);
|
1932
|
+
vsum = vec_add(vsum, vsum2);
|
1933
|
+
comparray[0] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1934
|
+
vsum = vec_splats(0);
|
1935
|
+
vsum2 = vec_splats(0);
|
1936
|
+
|
1937
|
+
c2[0] = vec_and(c2[1], lowMask);
|
1938
|
+
c2[1] = vec_sr(c2[1], v4);
|
1939
|
+
c2[0] = vec_sub(c2[0], v8);
|
1940
|
+
c2[1] = vec_sub(c2[1], v8);
|
1941
|
+
vsum = vec_sum4s(c2[0], vsum);
|
1942
|
+
vsum2 = vec_sum4s(c2[1], vsum2);
|
1943
|
+
vsum = vec_add(vsum, vsum2);
|
1944
|
+
comparray[1] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1945
|
+
vsum = vec_splats(0);
|
1946
|
+
vsum2 = vec_splats(0);
|
1947
|
+
|
1948
|
+
c3[0] = vec_and(c3[1], lowMask);
|
1949
|
+
c3[1] = vec_sr(c3[1], v4);
|
1950
|
+
c3[0] = vec_sub(c3[0], v8);
|
1951
|
+
c3[1] = vec_sub(c3[1], v8);
|
1952
|
+
vsum = vec_sum4s(c3[0], vsum);
|
1953
|
+
vsum2 = vec_sum4s(c3[1], vsum2);
|
1954
|
+
vsum = vec_add(vsum, vsum2);
|
1955
|
+
comparray[2] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1956
|
+
vsum = vec_splats(0);
|
1957
|
+
vsum2 = vec_splats(0);
|
1958
|
+
|
1959
|
+
c4[0] = vec_and(c4[1], lowMask);
|
1960
|
+
c4[1] = vec_sr(c4[1], v4);
|
1961
|
+
c4[0] = vec_sub(c4[0], v8);
|
1962
|
+
c4[1] = vec_sub(c4[1], v8);
|
1963
|
+
vsum = vec_sum4s(c4[0], vsum);
|
1964
|
+
vsum2 = vec_sum4s(c4[1], vsum2);
|
1965
|
+
vsum = vec_add(vsum, vsum2);
|
1966
|
+
comparray[3] = vsum[0] + vsum[1] + vsum[2] + vsum[3];
|
1967
|
+
vsum = vec_splats(0);
|
1968
|
+
vsum2 = vec_splats(0);
|
1969
|
+
|
1970
|
+
t1 = vec_perm(c1[0], c2[0], swiz1);
|
1971
|
+
t2 = vec_perm(c1[0], c2[0], swiz2);
|
1972
|
+
t3 = vec_perm(c3[0], c4[0], swiz1);
|
1973
|
+
t4 = vec_perm(c3[0], c4[0], swiz2);
|
1974
|
+
t5 = vec_perm(t1, t3, swiz3);
|
1975
|
+
t6 = vec_perm(t1, t3, swiz4);
|
1976
|
+
t7 = vec_perm(t2, t4, swiz3);
|
1977
|
+
t8 = vec_perm(t2, t4, swiz4);
|
1978
|
+
vec_xst(t5, 0, vecOffset);
|
1979
|
+
vec_xst(t6, 0, vecOffset+16);
|
1980
|
+
vec_xst(t7, 0, vecOffset+32);
|
1981
|
+
vec_xst(t8, 0, vecOffset+48);
|
1982
|
+
|
1983
|
+
t1 = vec_perm(c1[1], c2[1], swiz1);
|
1984
|
+
t2 = vec_perm(c1[1], c2[1], swiz2);
|
1985
|
+
t3 = vec_perm(c3[1], c4[1], swiz1);
|
1986
|
+
t4 = vec_perm(c3[1], c4[1], swiz2);
|
1987
|
+
t5 = vec_perm(t1, t3, swiz3);
|
1988
|
+
t6 = vec_perm(t1, t3, swiz4);
|
1989
|
+
t7 = vec_perm(t2, t4, swiz3);
|
1990
|
+
t8 = vec_perm(t2, t4, swiz4);
|
1991
|
+
vec_xst(t5, 0, vecOffset+64);
|
1992
|
+
vec_xst(t6, 0, vecOffset+80);
|
1993
|
+
vec_xst(t7, 0, vecOffset+96);
|
1994
|
+
vec_xst(t8, 0, vecOffset+112);
|
1995
|
+
aoffset1 += lda;
|
1996
|
+
aoffset2 += lda;
|
1997
|
+
aoffset3 += lda;
|
1998
|
+
vecOffset += 128;
|
1999
|
+
i--;
|
2000
|
+
} while(i > 0);
|
2001
|
+
}
|
2002
|
+
}
|
2003
|
+
}
|
2004
|
+
|
2005
|
+
template<typename VA, typename VB>
|
2006
|
+
void packNormal(const TB* a, int64_t lda, int rows, int cols, VA* vec, bool flip) {
|
2007
|
+
int64_t i, j;
|
2008
|
+
TB *aoffset = NULL;
|
2009
|
+
VA *vecOffset = NULL;
|
2010
|
+
TB *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL;
|
2011
|
+
TB *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL;
|
2012
|
+
__vector_pair C1, C2, C3, C4, C5, C6, C7, C8;
|
2013
|
+
VB c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2]={0};
|
2014
|
+
VB c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2]={0};
|
2015
|
+
VB t1, t2, t3, t4, t5, t6, t7, t8;
|
2016
|
+
vector unsigned char xor_vector;
|
2017
|
+
uint8_t flip_vec = 0x80;
|
2018
|
+
xor_vector = vec_splats(flip_vec);
|
2019
|
+
vector unsigned char swiz1 = {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23};
|
2020
|
+
vector unsigned char swiz2 = {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31};
|
2021
|
+
vector unsigned char swiz3 = {0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27};
|
2022
|
+
vector unsigned char swiz4 = {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31};
|
2023
|
+
|
2024
|
+
aoffset = const_cast<TB*>(a);
|
2025
|
+
vecOffset = vec;
|
2026
|
+
j = (rows >> 3);
|
2027
|
+
if (j > 0) {
|
2028
|
+
do {
|
2029
|
+
aoffset1 = aoffset;
|
2030
|
+
aoffset2 = aoffset1 + lda;
|
2031
|
+
aoffset3 = aoffset2 + lda;
|
2032
|
+
aoffset4 = aoffset3 + lda;
|
2033
|
+
aoffset5 = aoffset4 + lda;
|
2034
|
+
aoffset6 = aoffset5 + lda;
|
2035
|
+
aoffset7 = aoffset6 + lda;
|
2036
|
+
aoffset8 = aoffset7 + lda;
|
2037
|
+
aoffset += 8 * lda;
|
2038
|
+
|
2039
|
+
i = (cols >> 3);
|
2040
|
+
if (i > 0) {
|
2041
|
+
do {
|
2042
|
+
C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs);
|
2043
|
+
C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs);
|
2044
|
+
C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs);
|
2045
|
+
C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4->qs);
|
2046
|
+
C5 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset5->qs);
|
2047
|
+
C6 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset6->qs);
|
2048
|
+
C7 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset7->qs);
|
2049
|
+
C8 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset8->qs);
|
2050
|
+
|
2051
|
+
__builtin_vsx_disassemble_pair(c1, &C1);
|
2052
|
+
__builtin_vsx_disassemble_pair(c2, &C2);
|
2053
|
+
__builtin_vsx_disassemble_pair(c3, &C3);
|
2054
|
+
__builtin_vsx_disassemble_pair(c4, &C4);
|
2055
|
+
__builtin_vsx_disassemble_pair(c5, &C5);
|
2056
|
+
__builtin_vsx_disassemble_pair(c6, &C6);
|
2057
|
+
__builtin_vsx_disassemble_pair(c7, &C7);
|
2058
|
+
__builtin_vsx_disassemble_pair(c8, &C8);
|
2059
|
+
|
2060
|
+
t1 = vec_perm(c1[0], c2[0], swiz1);
|
2061
|
+
t2 = vec_perm(c1[0], c2[0], swiz2);
|
2062
|
+
t3 = vec_perm(c3[0], c4[0], swiz1);
|
2063
|
+
t4 = vec_perm(c3[0], c4[0], swiz2);
|
2064
|
+
t5 = vec_perm(t1, t3, swiz3);
|
2065
|
+
t6 = vec_perm(t1, t3, swiz4);
|
2066
|
+
t7 = vec_perm(t2, t4, swiz3);
|
2067
|
+
t8 = vec_perm(t2, t4, swiz4);
|
2068
|
+
if (flip == true) {
|
2069
|
+
t5 = vec_xor(t5, xor_vector);
|
2070
|
+
t6 = vec_xor(t6, xor_vector);
|
2071
|
+
t7 = vec_xor(t7, xor_vector);
|
2072
|
+
t8 = vec_xor(t8, xor_vector);
|
2073
|
+
}
|
2074
|
+
vec_xst(t5, 0, vecOffset);
|
2075
|
+
vec_xst(t6, 0, vecOffset+16);
|
2076
|
+
vec_xst(t7, 0, vecOffset+32);
|
2077
|
+
vec_xst(t8, 0, vecOffset+48);
|
2078
|
+
|
2079
|
+
t1 = vec_perm(c1[1], c2[1], swiz1);
|
2080
|
+
t2 = vec_perm(c1[1], c2[1], swiz2);
|
2081
|
+
t3 = vec_perm(c3[1], c4[1], swiz1);
|
2082
|
+
t4 = vec_perm(c3[1], c4[1], swiz2);
|
2083
|
+
t5 = vec_perm(t1, t3, swiz3);
|
2084
|
+
t6 = vec_perm(t1, t3, swiz4);
|
2085
|
+
t7 = vec_perm(t2, t4, swiz3);
|
2086
|
+
t8 = vec_perm(t2, t4, swiz4);
|
2087
|
+
if (flip == true) {
|
2088
|
+
t5 = vec_xor(t5, xor_vector);
|
2089
|
+
t6 = vec_xor(t6, xor_vector);
|
2090
|
+
t7 = vec_xor(t7, xor_vector);
|
2091
|
+
t8 = vec_xor(t8, xor_vector);
|
2092
|
+
}
|
2093
|
+
vec_xst(t5, 0, vecOffset+64);
|
2094
|
+
vec_xst(t6, 0, vecOffset+80);
|
2095
|
+
vec_xst(t7, 0, vecOffset+96);
|
2096
|
+
vec_xst(t8, 0, vecOffset+112);
|
2097
|
+
|
2098
|
+
t1 = vec_perm(c5[0], c6[0], swiz1);
|
2099
|
+
t2 = vec_perm(c5[0], c6[0], swiz2);
|
2100
|
+
t3 = vec_perm(c7[0], c8[0], swiz1);
|
2101
|
+
t4 = vec_perm(c7[0], c8[0], swiz2);
|
2102
|
+
t5 = vec_perm(t1, t3, swiz3);
|
2103
|
+
t6 = vec_perm(t1, t3, swiz4);
|
2104
|
+
t7 = vec_perm(t2, t4, swiz3);
|
2105
|
+
t8 = vec_perm(t2, t4, swiz4);
|
2106
|
+
if (flip == true) {
|
2107
|
+
t5 = vec_xor(t5, xor_vector);
|
2108
|
+
t6 = vec_xor(t6, xor_vector);
|
2109
|
+
t7 = vec_xor(t7, xor_vector);
|
2110
|
+
t8 = vec_xor(t8, xor_vector);
|
2111
|
+
}
|
2112
|
+
vec_xst(t5, 0, vecOffset+128);
|
2113
|
+
vec_xst(t6, 0, vecOffset+144);
|
2114
|
+
vec_xst(t7, 0, vecOffset+160);
|
2115
|
+
vec_xst(t8, 0, vecOffset+176);
|
2116
|
+
|
2117
|
+
t1 = vec_perm(c5[1], c6[1], swiz1);
|
2118
|
+
t2 = vec_perm(c5[1], c6[1], swiz2);
|
2119
|
+
t3 = vec_perm(c7[1], c8[1], swiz1);
|
2120
|
+
t4 = vec_perm(c7[1], c8[1], swiz2);
|
2121
|
+
t5 = vec_perm(t1, t3, swiz3);
|
2122
|
+
t6 = vec_perm(t1, t3, swiz4);
|
2123
|
+
t7 = vec_perm(t2, t4, swiz3);
|
2124
|
+
t8 = vec_perm(t2, t4, swiz4);
|
2125
|
+
if (flip == true) {
|
2126
|
+
t5 = vec_xor(t5, xor_vector);
|
2127
|
+
t6 = vec_xor(t6, xor_vector);
|
2128
|
+
t7 = vec_xor(t7, xor_vector);
|
2129
|
+
t8 = vec_xor(t8, xor_vector);
|
2130
|
+
}
|
2131
|
+
vec_xst(t5, 0, vecOffset+192);
|
2132
|
+
vec_xst(t6, 0, vecOffset+208);
|
2133
|
+
vec_xst(t7, 0, vecOffset+224);
|
2134
|
+
vec_xst(t8, 0, vecOffset+240);
|
2135
|
+
|
2136
|
+
aoffset1 += lda;
|
2137
|
+
aoffset2 += lda;
|
2138
|
+
aoffset3 += lda;
|
2139
|
+
aoffset4 += lda;
|
2140
|
+
aoffset5 += lda;
|
2141
|
+
aoffset6 += lda;
|
2142
|
+
aoffset7 += lda;
|
2143
|
+
aoffset8 += lda;
|
2144
|
+
vecOffset += 256;
|
2145
|
+
i--;
|
2146
|
+
} while(i > 0);
|
2147
|
+
}
|
2148
|
+
j--;
|
2149
|
+
} while(j > 0);
|
2150
|
+
}
|
2151
|
+
|
2152
|
+
if (rows & 4) {
|
2153
|
+
aoffset1 = aoffset;
|
2154
|
+
aoffset2 = aoffset1 + lda;
|
2155
|
+
aoffset3 = aoffset2 + lda;
|
2156
|
+
aoffset4 = aoffset3 + lda;
|
2157
|
+
aoffset += 4 * lda;
|
2158
|
+
|
2159
|
+
i = (cols >> 3);
|
2160
|
+
if (i > 0) {
|
2161
|
+
do {
|
2162
|
+
C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs);
|
2163
|
+
C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs);
|
2164
|
+
C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs);
|
2165
|
+
C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4->qs);
|
2166
|
+
|
2167
|
+
__builtin_vsx_disassemble_pair(c1, &C1);
|
2168
|
+
__builtin_vsx_disassemble_pair(c2, &C2);
|
2169
|
+
__builtin_vsx_disassemble_pair(c3, &C3);
|
2170
|
+
__builtin_vsx_disassemble_pair(c4, &C4);
|
2171
|
+
|
2172
|
+
t1 = vec_perm(c1[0], c2[0], swiz1);
|
2173
|
+
t2 = vec_perm(c1[0], c2[0], swiz2);
|
2174
|
+
t3 = vec_perm(c3[0], c4[0], swiz1);
|
2175
|
+
t4 = vec_perm(c3[0], c4[0], swiz2);
|
2176
|
+
t5 = vec_perm(t1, t3, swiz3);
|
2177
|
+
t6 = vec_perm(t1, t3, swiz4);
|
2178
|
+
t7 = vec_perm(t2, t4, swiz3);
|
2179
|
+
t8 = vec_perm(t2, t4, swiz4);
|
2180
|
+
if (flip == true) {
|
2181
|
+
t5 = vec_xor(t5, xor_vector);
|
2182
|
+
t6 = vec_xor(t6, xor_vector);
|
2183
|
+
t7 = vec_xor(t7, xor_vector);
|
2184
|
+
t8 = vec_xor(t8, xor_vector);
|
2185
|
+
}
|
2186
|
+
vec_xst(t5, 0, vecOffset);
|
2187
|
+
vec_xst(t6, 0, vecOffset+16);
|
2188
|
+
vec_xst(t7, 0, vecOffset+32);
|
2189
|
+
vec_xst(t8, 0, vecOffset+48);
|
2190
|
+
|
2191
|
+
t1 = vec_perm(c1[1], c2[1], swiz1);
|
2192
|
+
t2 = vec_perm(c1[1], c2[1], swiz2);
|
2193
|
+
t3 = vec_perm(c3[1], c4[1], swiz1);
|
2194
|
+
t4 = vec_perm(c3[1], c4[1], swiz2);
|
2195
|
+
t5 = vec_perm(t1, t3, swiz3);
|
2196
|
+
t6 = vec_perm(t1, t3, swiz4);
|
2197
|
+
t7 = vec_perm(t2, t4, swiz3);
|
2198
|
+
t8 = vec_perm(t2, t4, swiz4);
|
2199
|
+
if (flip == true) {
|
2200
|
+
t5 = vec_xor(t5, xor_vector);
|
2201
|
+
t6 = vec_xor(t6, xor_vector);
|
2202
|
+
t7 = vec_xor(t7, xor_vector);
|
2203
|
+
t8 = vec_xor(t8, xor_vector);
|
2204
|
+
}
|
2205
|
+
vec_xst(t5, 0, vecOffset+64);
|
2206
|
+
vec_xst(t6, 0, vecOffset+80);
|
2207
|
+
vec_xst(t7, 0, vecOffset+96);
|
2208
|
+
vec_xst(t8, 0, vecOffset+112);
|
2209
|
+
|
2210
|
+
aoffset1 += lda;
|
2211
|
+
aoffset2 += lda;
|
2212
|
+
aoffset3 += lda;
|
2213
|
+
aoffset4 += lda;
|
2214
|
+
vecOffset += 128;
|
2215
|
+
i--;
|
2216
|
+
} while(i > 0);
|
2217
|
+
}
|
2218
|
+
}
|
2219
|
+
if (rows & 3) {
|
2220
|
+
aoffset1 = aoffset;
|
2221
|
+
aoffset2 = aoffset1 + lda;
|
2222
|
+
aoffset3 = aoffset2 + lda;
|
2223
|
+
i = (cols >> 3);
|
2224
|
+
if (i > 0) {
|
2225
|
+
do {
|
2226
|
+
switch(rows) {
|
2227
|
+
case 3: C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs);
|
2228
|
+
__builtin_vsx_disassemble_pair(c3, &C3);
|
2229
|
+
case 2: C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs);
|
2230
|
+
__builtin_vsx_disassemble_pair(c2, &C2);
|
2231
|
+
case 1: C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs);
|
2232
|
+
__builtin_vsx_disassemble_pair(c1, &C1);
|
2233
|
+
break;
|
2234
|
+
}
|
2235
|
+
t1 = vec_perm(c1[0], c2[0], swiz1);
|
2236
|
+
t2 = vec_perm(c1[0], c2[0], swiz2);
|
2237
|
+
t3 = vec_perm(c3[0], c4[0], swiz1);
|
2238
|
+
t4 = vec_perm(c3[0], c4[0], swiz2);
|
2239
|
+
t5 = vec_perm(t1, t3, swiz3);
|
2240
|
+
t6 = vec_perm(t1, t3, swiz4);
|
2241
|
+
t7 = vec_perm(t2, t4, swiz3);
|
2242
|
+
t8 = vec_perm(t2, t4, swiz4);
|
2243
|
+
if (flip == true) {
|
2244
|
+
t5 = vec_xor(t5, xor_vector);
|
2245
|
+
t6 = vec_xor(t6, xor_vector);
|
2246
|
+
t7 = vec_xor(t7, xor_vector);
|
2247
|
+
t8 = vec_xor(t8, xor_vector);
|
2248
|
+
}
|
2249
|
+
vec_xst(t5, 0, vecOffset);
|
2250
|
+
vec_xst(t6, 0, vecOffset+16);
|
2251
|
+
vec_xst(t7, 0, vecOffset+32);
|
2252
|
+
vec_xst(t8, 0, vecOffset+48);
|
2253
|
+
|
2254
|
+
t1 = vec_perm(c1[1], c2[1], swiz1);
|
2255
|
+
t2 = vec_perm(c1[1], c2[1], swiz2);
|
2256
|
+
t3 = vec_perm(c3[1], c4[1], swiz1);
|
2257
|
+
t4 = vec_perm(c3[1], c4[1], swiz2);
|
2258
|
+
t5 = vec_perm(t1, t3, swiz3);
|
2259
|
+
t6 = vec_perm(t1, t3, swiz4);
|
2260
|
+
t7 = vec_perm(t2, t4, swiz3);
|
2261
|
+
t8 = vec_perm(t2, t4, swiz4);
|
2262
|
+
if (flip == true) {
|
2263
|
+
t5 = vec_xor(t5, xor_vector);
|
2264
|
+
t6 = vec_xor(t6, xor_vector);
|
2265
|
+
t7 = vec_xor(t7, xor_vector);
|
2266
|
+
t8 = vec_xor(t8, xor_vector);
|
2267
|
+
}
|
2268
|
+
vec_xst(t5, 0, vecOffset+64);
|
2269
|
+
vec_xst(t6, 0, vecOffset+80);
|
2270
|
+
vec_xst(t7, 0, vecOffset+96);
|
2271
|
+
vec_xst(t8, 0, vecOffset+112);
|
2272
|
+
|
2273
|
+
aoffset1 += lda;
|
2274
|
+
aoffset2 += lda;
|
2275
|
+
aoffset3 += lda;
|
2276
|
+
vecOffset += 128;
|
2277
|
+
i--;
|
2278
|
+
} while(i > 0);
|
2279
|
+
}
|
2280
|
+
}
|
2281
|
+
}
|
2282
|
+
|
2283
|
+
void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
2284
|
+
int64_t mc, nc, mp, np;
|
2285
|
+
int m_rem = MIN(m - m0, 8);
|
2286
|
+
int n_rem = MIN(n - n0, 8);
|
2287
|
+
// TO-DO: KERNEL_16x8 and KERNEL_8x16 are having some performance
|
2288
|
+
// issues. After resolving them, below code will be enabled.
|
2289
|
+
/*if (m_rem >= 16 && n_rem >= 8) {
|
2290
|
+
mc = 16;
|
2291
|
+
nc = 8;
|
2292
|
+
gemm<16,8>(m0, m, n0, n);
|
2293
|
+
} else if(m_rem >= 8 && n_rem >= 16) {
|
2294
|
+
mc = 8;
|
2295
|
+
nc = 16;
|
2296
|
+
gemm<8,16>(m0, m, n0, n);
|
2297
|
+
}*/
|
2298
|
+
if (m_rem >= 8 && n_rem >= 8) {
|
2299
|
+
mc = 8;
|
2300
|
+
nc = 8;
|
2301
|
+
gemm<8,8>(m0, m, n0, n);
|
2302
|
+
} else if (m_rem >= 4 && n_rem >= 8) {
|
2303
|
+
mc = 4;
|
2304
|
+
nc = 8;
|
2305
|
+
gemm<4,8>(m0, m, n0, n);
|
2306
|
+
} else if (m_rem >= 8 && n_rem >= 4) {
|
2307
|
+
mc = 8;
|
2308
|
+
nc = 4;
|
2309
|
+
gemm<8,4>(m0, m, n0, n);
|
2310
|
+
} else if (m_rem >= 4 && n_rem >= 4) {
|
2311
|
+
mc = 4;
|
2312
|
+
nc = 4;
|
2313
|
+
gemm_small<4, 4>(m0, m, n0, n);
|
2314
|
+
} else if ((m_rem < 4) && (n_rem > 4)) {
|
2315
|
+
nc = 4;
|
2316
|
+
switch(m_rem) {
|
2317
|
+
case 1:
|
2318
|
+
mc = 1;
|
2319
|
+
gemm_small<1, 4>(m0, m, n0, n);
|
2320
|
+
break;
|
2321
|
+
case 2:
|
2322
|
+
mc = 2;
|
2323
|
+
gemm_small<2, 4>(m0, m, n0, n);
|
2324
|
+
break;
|
2325
|
+
case 3:
|
2326
|
+
mc = 3;
|
2327
|
+
gemm_small<3, 4>(m0, m, n0, n);
|
2328
|
+
break;
|
2329
|
+
default:
|
2330
|
+
return;
|
2331
|
+
}
|
2332
|
+
} else if ((m_rem > 4) && (n_rem < 4)) {
|
2333
|
+
mc = 4;
|
2334
|
+
switch(n_rem) {
|
2335
|
+
case 1:
|
2336
|
+
nc = 1;
|
2337
|
+
gemm_small<4, 1>(m0, m, n0, n);
|
2338
|
+
break;
|
2339
|
+
case 2:
|
2340
|
+
nc = 2;
|
2341
|
+
gemm_small<4, 2>(m0, m, n0, n);
|
2342
|
+
break;
|
2343
|
+
case 3:
|
2344
|
+
nc = 3;
|
2345
|
+
gemm_small<4, 3>(m0, m, n0, n);
|
2346
|
+
break;
|
2347
|
+
default:
|
2348
|
+
return;
|
2349
|
+
}
|
2350
|
+
} else {
|
2351
|
+
switch((m_rem << 4) | n_rem) {
|
2352
|
+
case 0x43:
|
2353
|
+
mc = 4;
|
2354
|
+
nc = 3;
|
2355
|
+
gemm_small<4, 3>(m0, m, n0, n);
|
2356
|
+
break;
|
2357
|
+
case 0x42:
|
2358
|
+
mc = 4;
|
2359
|
+
nc = 2;
|
2360
|
+
gemm_small<4, 2>(m0, m, n0, n);
|
2361
|
+
break;
|
2362
|
+
case 0x41:
|
2363
|
+
mc = 4;
|
2364
|
+
nc = 1;
|
2365
|
+
gemm_small<4, 1>(m0, m, n0, n);
|
2366
|
+
break;
|
2367
|
+
case 0x34:
|
2368
|
+
mc = 3;
|
2369
|
+
nc = 4;
|
2370
|
+
gemm_small<3, 4>(m0, m, n0, n);
|
2371
|
+
break;
|
2372
|
+
case 0x33:
|
2373
|
+
mc = 3;
|
2374
|
+
nc = 3;
|
2375
|
+
gemm_small<3, 3>(m0, m, n0, n);
|
2376
|
+
break;
|
2377
|
+
case 0x32:
|
2378
|
+
mc = 3;
|
2379
|
+
nc = 2;
|
2380
|
+
gemm_small<3, 2>(m0, m, n0, n);
|
2381
|
+
break;
|
2382
|
+
case 0x31:
|
2383
|
+
mc = 3;
|
2384
|
+
nc = 1;
|
2385
|
+
gemm_small<3, 1>(m0, m, n0, n);
|
2386
|
+
break;
|
2387
|
+
case 0x24:
|
2388
|
+
mc = 2;
|
2389
|
+
nc = 4;
|
2390
|
+
gemm_small<2, 4>(m0, m, n0, n);
|
2391
|
+
break;
|
2392
|
+
case 0x23:
|
2393
|
+
mc = 2;
|
2394
|
+
nc = 3;
|
2395
|
+
gemm_small<2, 3>(m0, m, n0, n);
|
2396
|
+
break;
|
2397
|
+
case 0x22:
|
2398
|
+
mc = 2;
|
2399
|
+
nc = 2;
|
2400
|
+
gemm_small<2, 2>(m0, m, n0, n);
|
2401
|
+
break;
|
2402
|
+
case 0x21:
|
2403
|
+
mc = 2;
|
2404
|
+
nc = 1;
|
2405
|
+
gemm_small<2, 1>(m0, m, n0, n);
|
2406
|
+
break;
|
2407
|
+
case 0x14:
|
2408
|
+
mc = 1;
|
2409
|
+
nc = 4;
|
2410
|
+
gemm_small<1, 4>(m0, m, n0, n);
|
2411
|
+
break;
|
2412
|
+
case 0x13:
|
2413
|
+
mc = 1;
|
2414
|
+
nc = 3;
|
2415
|
+
gemm_small<1, 3>(m0, m, n0, n);
|
2416
|
+
break;
|
2417
|
+
case 0x12:
|
2418
|
+
mc = 1;
|
2419
|
+
nc = 2;
|
2420
|
+
gemm_small<1, 2>(m0, m, n0, n);
|
2421
|
+
break;
|
2422
|
+
case 0x11:
|
2423
|
+
mc = 1;
|
2424
|
+
nc = 1;
|
2425
|
+
gemm_small<1, 1>(m0, m, n0, n);
|
2426
|
+
break;
|
2427
|
+
default:
|
2428
|
+
return;
|
2429
|
+
}
|
2430
|
+
}
|
2431
|
+
mp = m0 + (m - m0) / mc * mc;
|
2432
|
+
np = n0 + (n - n0) / nc * nc;
|
2433
|
+
mnpack(mp, m, n0, np);
|
2434
|
+
mnpack(m0, m, np, n);
|
2435
|
+
}
|
2436
|
+
|
2437
|
+
void KERNEL_4x8(int64_t ii, int64_t jj) {
|
2438
|
+
vec_t vec_A[8], vec_B[16] = {0};
|
2439
|
+
acc_t acc_0, acc_1;
|
2440
|
+
std::array<int, 4> comparray {};
|
2441
|
+
vector float fin_res[8] = {0};
|
2442
|
+
vector float vs[8] = {0};
|
2443
|
+
bool isAblock_q4 = std::is_same_v<TA, block_q4_0>;
|
2444
|
+
for (int l = 0; l < k; l++) {
|
2445
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
2446
|
+
__builtin_mma_xxsetaccz(&acc_1);
|
2447
|
+
if (std::is_same_v<TA, block_q4_0>) {
|
2448
|
+
packNormalInt4<int8_t, vector signed char, 4>((A+(ii*lda)+l), lda, 4, 4, (int8_t*)vec_A, comparray);
|
2449
|
+
} else {
|
2450
|
+
packNormal<int8_t, vector signed char>((const TB*)(A+(ii*lda)+l), lda, 4, 8, (int8_t*)vec_A, false);
|
2451
|
+
}
|
2452
|
+
packNormal<uint8_t, vector unsigned char>((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B, true);
|
2453
|
+
for(int x = 0; x < 8; x++) {
|
2454
|
+
__builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]);
|
2455
|
+
__builtin_mma_xvi8ger4pp(&acc_1, vec_A[x], vec_B[x+8]);
|
2456
|
+
}
|
2457
|
+
for (int I = 0; I<4; I++) {
|
2458
|
+
for (int J = 0; J<4; J++) {
|
2459
|
+
*((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d));
|
2460
|
+
*((float*)&vs[I+4]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J+4)*ldb)+l)->d));
|
2461
|
+
}
|
2462
|
+
}
|
2463
|
+
if (!isAblock_q4) {
|
2464
|
+
auto aoffset = A+(ii*lda)+l;
|
2465
|
+
for (int i = 0; i < 4; i++) {
|
2466
|
+
comparray[i] = 0;
|
2467
|
+
int ca = 0;
|
2468
|
+
auto *at = aoffset->qs;
|
2469
|
+
for (int j = 0; j < 32; j++)
|
2470
|
+
ca += (int)*at++;
|
2471
|
+
comparray[i] = ca;
|
2472
|
+
aoffset += lda;
|
2473
|
+
}
|
2474
|
+
}
|
2475
|
+
compute<4>(&acc_0, 0, 0, comparray, vs, fin_res);
|
2476
|
+
compute<4>(&acc_1, 0, 4, comparray, vs, fin_res);
|
2477
|
+
}
|
2478
|
+
save_res<4, 4>(ii, jj, 0, fin_res);
|
2479
|
+
save_res<4, 4>(ii, jj+4, 4, fin_res);
|
2480
|
+
}
|
2481
|
+
|
2482
|
+
void KERNEL_8x4(int64_t ii, int64_t jj) {
|
2483
|
+
vec_t vec_A[16], vec_B[8] = {0};
|
2484
|
+
acc_t acc_0, acc_1;
|
2485
|
+
std::array<int, 8> comparray {};
|
2486
|
+
vector float fin_res[8] = {0};
|
2487
|
+
vector float vs[8] = {0};
|
2488
|
+
bool isAblock_q4 = std::is_same_v<TA, block_q4_0>;
|
2489
|
+
for (int l = 0; l < k; l++) {
|
2490
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
2491
|
+
__builtin_mma_xxsetaccz(&acc_1);
|
2492
|
+
if (std::is_same_v<TA, block_q4_0>) {
|
2493
|
+
packNormalInt4<int8_t, vector signed char, 8>((A+(ii*lda)+l), lda, 8, 4, (int8_t*)vec_A, comparray);
|
2494
|
+
} else {
|
2495
|
+
packNormal<int8_t, vector signed char>((const TB*)(A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false);
|
2496
|
+
}
|
2497
|
+
packNormal<uint8_t, vector unsigned char>((B+(jj*ldb)+l), ldb, 4, 8, (uint8_t*)vec_B, true);
|
2498
|
+
for(int x = 0; x < 8; x++) {
|
2499
|
+
__builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]);
|
2500
|
+
__builtin_mma_xvi8ger4pp(&acc_1, vec_A[x+8], vec_B[x]);
|
2501
|
+
}
|
2502
|
+
for (int I = 0; I<8; I++) {
|
2503
|
+
for (int J = 0; J<4; J++) {
|
2504
|
+
*((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d));
|
2505
|
+
}
|
2506
|
+
}
|
2507
|
+
if (!isAblock_q4) {
|
2508
|
+
auto aoffset = A+(ii*lda)+l;
|
2509
|
+
for (int i = 0; i < 8; i++) {
|
2510
|
+
comparray[i] = 0;
|
2511
|
+
int ca = 0;
|
2512
|
+
auto *at = aoffset->qs;
|
2513
|
+
for (int j = 0; j < 32; j++)
|
2514
|
+
ca += (int)*at++;
|
2515
|
+
comparray[i] = ca;
|
2516
|
+
aoffset += lda;
|
2517
|
+
}
|
2518
|
+
}
|
2519
|
+
compute<8>(&acc_0, 0, 0, comparray, vs, fin_res);
|
2520
|
+
compute<8>(&acc_1, 4, 4, comparray, vs, fin_res);
|
2521
|
+
}
|
2522
|
+
save_res<4, 4>(ii, jj, 0, fin_res);
|
2523
|
+
save_res<4, 4>(ii+4, jj, 4, fin_res);
|
2524
|
+
}
|
2525
|
+
|
2526
|
+
void KERNEL_8x8(int64_t ii, int64_t jj) {
|
2527
|
+
vec_t vec_A[16], vec_B[16] = {0};
|
2528
|
+
acc_t acc_0, acc_1, acc_2, acc_3;
|
2529
|
+
std::array<int, 8> comparray {};
|
2530
|
+
vector float fin_res[16] = {0};
|
2531
|
+
vector float vs[16] = {0};
|
2532
|
+
bool isAblock_q4 = std::is_same_v<TA, block_q4_0>;
|
2533
|
+
for (int l = 0; l < k; l++) {
|
2534
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
2535
|
+
__builtin_mma_xxsetaccz(&acc_1);
|
2536
|
+
__builtin_mma_xxsetaccz(&acc_2);
|
2537
|
+
__builtin_mma_xxsetaccz(&acc_3);
|
2538
|
+
if (std::is_same_v<TA, block_q4_0>) {
|
2539
|
+
packNormalInt4<int8_t, vector signed char, 8>((A+(ii*lda)+l), lda, 8, 4, (int8_t*)vec_A, comparray);
|
2540
|
+
} else {
|
2541
|
+
packNormal<int8_t, vector signed char>((const TB*)(A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false);
|
2542
|
+
}
|
2543
|
+
packNormal<uint8_t, vector unsigned char>((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B, true);
|
2544
|
+
for(int x = 0; x < 8; x++) {
|
2545
|
+
__builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]);
|
2546
|
+
__builtin_mma_xvi8ger4pp(&acc_1, vec_A[x+8], vec_B[x]);
|
2547
|
+
__builtin_mma_xvi8ger4pp(&acc_2, vec_A[x], vec_B[x+8]);
|
2548
|
+
__builtin_mma_xvi8ger4pp(&acc_3, vec_A[x+8], vec_B[x+8]);
|
2549
|
+
}
|
2550
|
+
for (int I = 0; I<8; I++) {
|
2551
|
+
for (int J = 0; J<4; J++) {
|
2552
|
+
*((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d));
|
2553
|
+
*((float*)&vs[I+8]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J+4)*ldb)+l)->d));
|
2554
|
+
}
|
2555
|
+
}
|
2556
|
+
if (!isAblock_q4) {
|
2557
|
+
auto aoffset = A+(ii*lda)+l;
|
2558
|
+
for (int i = 0; i < 8; i++) {
|
2559
|
+
comparray[i] = 0;
|
2560
|
+
int ca = 0;
|
2561
|
+
auto *at = aoffset->qs;
|
2562
|
+
for (int j = 0; j < 32; j++)
|
2563
|
+
ca += (int)*at++;
|
2564
|
+
comparray[i] = ca;
|
2565
|
+
aoffset += lda;
|
2566
|
+
}
|
2567
|
+
}
|
2568
|
+
compute<8>(&acc_0, 0, 0, comparray, vs, fin_res);
|
2569
|
+
compute<8>(&acc_1, 4, 4, comparray, vs, fin_res);
|
2570
|
+
compute<8>(&acc_2, 0, 8, comparray, vs, fin_res);
|
2571
|
+
compute<8>(&acc_3, 4, 12, comparray, vs, fin_res);
|
2572
|
+
}
|
2573
|
+
save_res<4, 4>(ii, jj, 0, fin_res);
|
2574
|
+
save_res<4, 4>(ii+4, jj, 4, fin_res);
|
2575
|
+
save_res<4, 4>(ii, jj+4, 8, fin_res);
|
2576
|
+
save_res<4, 4>(ii+4, jj+4, 12, fin_res);
|
2577
|
+
}
|
2578
|
+
|
2579
|
+
template<int RM, int RN>
|
2580
|
+
void gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
2581
|
+
int64_t ytiles = (m - m0) / RM;
|
2582
|
+
int64_t xtiles = (n - n0) / RN;
|
2583
|
+
int64_t tiles = xtiles * ytiles;
|
2584
|
+
int64_t duty = (tiles + nth - 1) / nth;
|
2585
|
+
int64_t start = duty * ith;
|
2586
|
+
int64_t end = start + duty;
|
2587
|
+
vec_t vec_A[8] = {0}, vec_B[8] = {0};
|
2588
|
+
vector signed int vec_C[4];
|
2589
|
+
acc_t acc_0;
|
2590
|
+
bool isAblock_q4 = std::is_same_v<TA, block_q4_0>;
|
2591
|
+
|
2592
|
+
if (end > tiles)
|
2593
|
+
end = tiles;
|
2594
|
+
for (int64_t job = start; job < end; ++job) {
|
2595
|
+
int64_t ii = m0 + job / xtiles * RM;
|
2596
|
+
int64_t jj = n0 + job % xtiles * RN;
|
2597
|
+
std::array<int, 4> comparray{};
|
2598
|
+
vector float res[4] = {0};
|
2599
|
+
vector float fin_res[4] = {0};
|
2600
|
+
vector float vs[4] = {0};
|
2601
|
+
vector float CA[4] = {0};
|
2602
|
+
__builtin_prefetch((A+(ii*lda)+0)->qs, 0, 1); // prefetch first value
|
2603
|
+
__builtin_prefetch((B+(jj*ldb)+0)->qs, 0, 1); // prefetch first value
|
2604
|
+
for (int l = 0; l < k; l++) {
|
2605
|
+
__builtin_prefetch((A+(ii*lda)+(l+1))->qs, 0, 1); // prefetch one loop ahead
|
2606
|
+
__builtin_prefetch((B+(jj*ldb)+(l+1))->qs, 0, 1); // prefetch one loop ahead
|
2607
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
2608
|
+
if (isAblock_q4) {
|
2609
|
+
packNormalInt4<int8_t, vector signed char, 4>((A+(ii*lda)+l), lda, RM, 4, (int8_t*)vec_A, comparray);
|
2610
|
+
} else {
|
2611
|
+
packNormal<int8_t, vector signed char>((const TB*)(A+(ii*lda)+l), lda, RM, 8, (int8_t*)vec_A, false);
|
2612
|
+
}
|
2613
|
+
packNormal<uint8_t, vector unsigned char>((B+(jj*ldb)+l), ldb, RN, 8, (uint8_t*)vec_B, true);
|
2614
|
+
for(int x = 0; x < 8; x+=4) {
|
2615
|
+
__builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]);
|
2616
|
+
__builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+1], vec_B[x+1]);
|
2617
|
+
__builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+2], vec_B[x+2]);
|
2618
|
+
__builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+3], vec_B[x+3]);
|
2619
|
+
}
|
2620
|
+
for (int I = 0; I<RM; I++) {
|
2621
|
+
for (int J = 0; J<RN; J++) {
|
2622
|
+
*((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d));
|
2623
|
+
}
|
2624
|
+
}
|
2625
|
+
__builtin_mma_disassemble_acc(vec_C, &acc_0);
|
2626
|
+
if (!isAblock_q4) {
|
2627
|
+
auto aoffset = A+(ii*lda)+l;
|
2628
|
+
for (int i = 0; i < RM; i++) {
|
2629
|
+
comparray[i] = 0;
|
2630
|
+
int ca = 0;
|
2631
|
+
auto *at = aoffset->qs;
|
2632
|
+
for (int j = 0; j < 32; j++)
|
2633
|
+
ca += (int)*at++;
|
2634
|
+
comparray[i] = ca;
|
2635
|
+
aoffset += lda;
|
2636
|
+
}
|
2637
|
+
}
|
2638
|
+
for (int i = 0; i < RM; i++) {
|
2639
|
+
CA[i] = vec_splats((float)(((double)comparray[i]) * -128.0));
|
2640
|
+
res[i] = vec_add(vec_ctf(vec_C[i], 0), CA[i]);
|
2641
|
+
fin_res[i] = vec_madd(res[i], vs[i], fin_res[i]);
|
2642
|
+
}
|
2643
|
+
}
|
2644
|
+
save_res<RM, RN>(ii, jj, 0, fin_res);
|
2645
|
+
}
|
2646
|
+
}
|
2647
|
+
|
2648
|
+
template<int RM, int RN>
|
2649
|
+
inline void kernel(int64_t ii, int64_t jj) {
|
2650
|
+
if constexpr(RM == 4 && RN == 8) {
|
2651
|
+
KERNEL_4x8(ii,jj);
|
2652
|
+
} else if constexpr(RM == 8 && RN == 4) {
|
2653
|
+
KERNEL_8x4(ii,jj);
|
2654
|
+
} else if constexpr(RM == 8 && RN == 8) {
|
2655
|
+
KERNEL_8x8(ii,jj);
|
2656
|
+
} else {
|
2657
|
+
static_assert(false, "RN/RM values not supported");
|
2658
|
+
}
|
2659
|
+
}
|
2660
|
+
|
2661
|
+
template <int RM, int RN>
|
2662
|
+
NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
2663
|
+
int64_t ytiles = (m - m0) / RM;
|
2664
|
+
int64_t xtiles = (n - n0) / RN;
|
2665
|
+
int64_t tiles = xtiles * ytiles;
|
2666
|
+
int64_t duty = (tiles + nth - 1) / nth;
|
2667
|
+
int64_t start = duty * ith;
|
2668
|
+
int64_t end = start + duty;
|
2669
|
+
if (end > tiles)
|
2670
|
+
end = tiles;
|
2671
|
+
for (int64_t job = start; job < end; ++job) {
|
2672
|
+
int64_t ii = m0 + job / xtiles * RM;
|
2673
|
+
int64_t jj = n0 + job % xtiles * RN;
|
2674
|
+
kernel<RM, RN>(ii, jj);
|
2675
|
+
}
|
2676
|
+
}
|
2677
|
+
|
2678
|
+
const TA *const A;
|
2679
|
+
const TB *const B;
|
2680
|
+
TC *C;
|
2681
|
+
TA *At;
|
2682
|
+
TB *Bt;
|
2683
|
+
const int64_t k;
|
2684
|
+
const int64_t lda;
|
2685
|
+
const int64_t ldb;
|
2686
|
+
const int64_t ldc;
|
2687
|
+
const int ith;
|
2688
|
+
const int nth;
|
2689
|
+
};
|
2690
|
+
|
2691
|
+
template <typename TA, typename TB, typename TC>
|
2692
|
+
class tinyBLAS_PPC {
|
2693
|
+
public:
|
2694
|
+
tinyBLAS_PPC(int64_t k,
|
2695
|
+
const TA *A, int64_t lda,
|
2696
|
+
const TB *B, int64_t ldb,
|
2697
|
+
TC *C, int64_t ldc,
|
2698
|
+
int ith, int nth)
|
2699
|
+
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
|
2700
|
+
}
|
2701
|
+
|
2702
|
+
void matmul(int64_t m, int64_t n) {
|
2703
|
+
mnpack(0, m, 0, n);
|
2704
|
+
}
|
2705
|
+
|
2706
|
+
private:
|
2707
|
+
|
2708
|
+
void (tinyBLAS_PPC::*kernel)(int64_t, int64_t);
|
2709
|
+
|
2710
|
+
template<typename VA>
|
2711
|
+
void packTranspose(const TA* a, int64_t lda, int rows, int cols, TA* vec) {
|
2712
|
+
int64_t i, j;
|
2713
|
+
TA *aoffset = NULL, *boffset = NULL;
|
2714
|
+
TA *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL;
|
2715
|
+
TA *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL;
|
2716
|
+
__vector_pair C1, C2, C3, C4, C5, C6, C7, C8;
|
2717
|
+
VA c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2] = {0};
|
2718
|
+
VA c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2] = {0};
|
2719
|
+
VA t1, t2, t3, t4, t5, t6, t7, t8;
|
2720
|
+
aoffset = const_cast<TA*>(a);
|
2721
|
+
boffset = vec;
|
2722
|
+
j = (rows >> 3);
|
2723
|
+
if (j > 0) {
|
2724
|
+
|
2725
|
+
do {
|
2726
|
+
aoffset1 = aoffset;
|
2727
|
+
aoffset2 = aoffset1 + lda;
|
2728
|
+
aoffset3 = aoffset2 + lda;
|
2729
|
+
aoffset4 = aoffset3 + lda;
|
2730
|
+
aoffset5 = aoffset4 + lda;
|
2731
|
+
aoffset6 = aoffset5 + lda;
|
2732
|
+
aoffset7 = aoffset6 + lda;
|
2733
|
+
aoffset8 = aoffset7 + lda;
|
2734
|
+
aoffset += 8 * lda;
|
2735
|
+
i = (cols >> 3);
|
2736
|
+
if (i > 0) {
|
2737
|
+
do {
|
2738
|
+
C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1);
|
2739
|
+
C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2);
|
2740
|
+
C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3);
|
2741
|
+
C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4);
|
2742
|
+
C5 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset5);
|
2743
|
+
C6 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset6);
|
2744
|
+
C7 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset7);
|
2745
|
+
C8 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset8);
|
2746
|
+
__builtin_vsx_disassemble_pair(c1, &C1);
|
2747
|
+
__builtin_vsx_disassemble_pair(c2, &C2);
|
2748
|
+
__builtin_vsx_disassemble_pair(c3, &C3);
|
2749
|
+
__builtin_vsx_disassemble_pair(c4, &C4);
|
2750
|
+
__builtin_vsx_disassemble_pair(c5, &C5);
|
2751
|
+
__builtin_vsx_disassemble_pair(c6, &C6);
|
2752
|
+
__builtin_vsx_disassemble_pair(c7, &C7);
|
2753
|
+
__builtin_vsx_disassemble_pair(c8, &C8);
|
2754
|
+
|
2755
|
+
t1 = vec_mergeh(c1[0], c2[0]);
|
2756
|
+
t2 = vec_mergeh(c3[0], c4[0]);
|
2757
|
+
t3 = vec_mergeh(c5[0], c6[0]);
|
2758
|
+
t4 = vec_mergeh(c7[0], c8[0]);
|
2759
|
+
t5 = vec_xxpermdi(t1, t2, 0);
|
2760
|
+
t6 = vec_xxpermdi(t3, t4, 0);
|
2761
|
+
t7 = vec_xxpermdi(t1, t2, 3);
|
2762
|
+
t8 = vec_xxpermdi(t3, t4, 3);
|
2763
|
+
vec_xst(t5, 0, boffset);
|
2764
|
+
vec_xst(t6, 0, boffset+4);
|
2765
|
+
vec_xst(t7, 0, boffset+8);
|
2766
|
+
vec_xst(t8, 0, boffset+12);
|
2767
|
+
|
2768
|
+
t1 = vec_mergel(c1[0], c2[0]);
|
2769
|
+
t2 = vec_mergel(c3[0], c4[0]);
|
2770
|
+
t3 = vec_mergel(c5[0], c6[0]);
|
2771
|
+
t4 = vec_mergel(c7[0], c8[0]);
|
2772
|
+
t5 = vec_xxpermdi(t1, t2, 0);
|
2773
|
+
t6 = vec_xxpermdi(t3, t4, 0);
|
2774
|
+
t7 = vec_xxpermdi(t1, t2, 3);
|
2775
|
+
t8 = vec_xxpermdi(t3, t4, 3);
|
2776
|
+
vec_xst(t5, 0, boffset+16);
|
2777
|
+
vec_xst(t6, 0, boffset+20);
|
2778
|
+
vec_xst(t7, 0, boffset+24);
|
2779
|
+
vec_xst(t8, 0, boffset+28);
|
2780
|
+
|
2781
|
+
t1 = vec_mergeh(c1[1], c2[1]);
|
2782
|
+
t2 = vec_mergeh(c3[1], c4[1]);
|
2783
|
+
t3 = vec_mergeh(c5[1], c6[1]);
|
2784
|
+
t4 = vec_mergeh(c7[1], c8[1]);
|
2785
|
+
t5 = vec_xxpermdi(t1, t2, 0);
|
2786
|
+
t6 = vec_xxpermdi(t3, t4, 0);
|
2787
|
+
t7 = vec_xxpermdi(t1, t2, 3);
|
2788
|
+
t8 = vec_xxpermdi(t3, t4, 3);
|
2789
|
+
vec_xst(t5, 0, boffset+32);
|
2790
|
+
vec_xst(t6, 0, boffset+36);
|
2791
|
+
vec_xst(t7, 0, boffset+40);
|
2792
|
+
vec_xst(t8, 0, boffset+44);
|
2793
|
+
|
2794
|
+
t1 = vec_mergel(c1[1], c2[1]);
|
2795
|
+
t2 = vec_mergel(c3[1], c4[1]);
|
2796
|
+
t3 = vec_mergel(c5[1], c6[1]);
|
2797
|
+
t4 = vec_mergel(c7[1], c8[1]);
|
2798
|
+
t5 = vec_xxpermdi(t1, t2, 0);
|
2799
|
+
t6 = vec_xxpermdi(t3, t4, 0);
|
2800
|
+
t7 = vec_xxpermdi(t1, t2, 3);
|
2801
|
+
t8 = vec_xxpermdi(t3, t4, 3);
|
2802
|
+
vec_xst(t5, 0, boffset+48);
|
2803
|
+
vec_xst(t6, 0, boffset+52);
|
2804
|
+
vec_xst(t7, 0, boffset+56);
|
2805
|
+
vec_xst(t8, 0, boffset+60);
|
2806
|
+
|
2807
|
+
aoffset1 += 8*lda;
|
2808
|
+
aoffset2 += 8*lda;
|
2809
|
+
aoffset3 += 8*lda;
|
2810
|
+
aoffset4 += 8*lda;
|
2811
|
+
boffset += 64;
|
2812
|
+
i--;
|
2813
|
+
} while(i > 0);
|
2814
|
+
}
|
2815
|
+
if (cols & 4) {
|
2816
|
+
c1[0] = vec_xl(0, aoffset1);
|
2817
|
+
c2[0] = vec_xl(0, aoffset2);
|
2818
|
+
c3[0] = vec_xl(0, aoffset3);
|
2819
|
+
c4[0] = vec_xl(0, aoffset4);
|
2820
|
+
c5[0] = vec_xl(0, aoffset5);
|
2821
|
+
c6[0] = vec_xl(0, aoffset6);
|
2822
|
+
c7[0] = vec_xl(0, aoffset7);
|
2823
|
+
c8[0] = vec_xl(0, aoffset8);
|
2824
|
+
|
2825
|
+
t1 = vec_mergeh(c1[0], c2[0]);
|
2826
|
+
t2 = vec_mergeh(c3[0], c4[0]);
|
2827
|
+
t3 = vec_mergeh(c5[0], c6[0]);
|
2828
|
+
t4 = vec_mergeh(c7[0], c8[0]);
|
2829
|
+
t5 = vec_xxpermdi(t1, t2, 0);
|
2830
|
+
t6 = vec_xxpermdi(t3, t4, 0);
|
2831
|
+
t7 = vec_xxpermdi(t1, t2, 3);
|
2832
|
+
t8 = vec_xxpermdi(t3, t4, 3);
|
2833
|
+
vec_xst(t5, 0, boffset);
|
2834
|
+
vec_xst(t6, 0, boffset+4);
|
2835
|
+
vec_xst(t7, 0, boffset+8);
|
2836
|
+
vec_xst(t8, 0, boffset+12);
|
2837
|
+
|
2838
|
+
t1 = vec_mergel(c1[0], c2[0]);
|
2839
|
+
t2 = vec_mergel(c3[0], c4[0]);
|
2840
|
+
t3 = vec_mergel(c5[0], c6[0]);
|
2841
|
+
t4 = vec_mergel(c7[0], c8[0]);
|
2842
|
+
t5 = vec_xxpermdi(t1, t2, 0);
|
2843
|
+
t6 = vec_xxpermdi(t3, t4, 0);
|
2844
|
+
t7 = vec_xxpermdi(t1, t2, 3);
|
2845
|
+
t8 = vec_xxpermdi(t3, t4, 3);
|
2846
|
+
vec_xst(t5, 0, boffset+16);
|
2847
|
+
vec_xst(t6, 0, boffset+20);
|
2848
|
+
vec_xst(t7, 0, boffset+24);
|
2849
|
+
vec_xst(t8, 0, boffset+28);
|
2850
|
+
}
|
2851
|
+
j--;
|
2852
|
+
} while(j > 0);
|
2853
|
+
}
|
2854
|
+
|
2855
|
+
if (rows & 4) {
|
2856
|
+
aoffset1 = aoffset;
|
2857
|
+
aoffset2 = aoffset1 + lda;
|
2858
|
+
aoffset3 = aoffset2 + lda;
|
2859
|
+
aoffset4 = aoffset3 + lda;
|
2860
|
+
aoffset += 4 * lda;
|
2861
|
+
i = (cols >> 3);
|
2862
|
+
if (i > 0) {
|
2863
|
+
do {
|
2864
|
+
C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1);
|
2865
|
+
C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2);
|
2866
|
+
C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3);
|
2867
|
+
C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4);
|
2868
|
+
__builtin_vsx_disassemble_pair(c1, &C1);
|
2869
|
+
__builtin_vsx_disassemble_pair(c2, &C2);
|
2870
|
+
__builtin_vsx_disassemble_pair(c3, &C3);
|
2871
|
+
__builtin_vsx_disassemble_pair(c4, &C4);
|
2872
|
+
|
2873
|
+
t1 = vec_mergeh(c1[0], c2[0]);
|
2874
|
+
t2 = vec_mergeh(c3[0], c4[0]);
|
2875
|
+
t3 = vec_mergel(c1[0], c2[0]);
|
2876
|
+
t4 = vec_mergel(c3[0], c4[0]);
|
2877
|
+
t5 = vec_xxpermdi(t1, t2, 0);
|
2878
|
+
t6 = vec_xxpermdi(t1, t2, 3);
|
2879
|
+
t7 = vec_xxpermdi(t3, t4, 0);
|
2880
|
+
t8 = vec_xxpermdi(t3, t4, 3);
|
2881
|
+
vec_xst(t5, 0, boffset);
|
2882
|
+
vec_xst(t6, 0, boffset+4);
|
2883
|
+
vec_xst(t7, 0, boffset+8);
|
2884
|
+
vec_xst(t8, 0, boffset+12);
|
2885
|
+
|
2886
|
+
t1 = vec_mergeh(c1[1], c2[1]);
|
2887
|
+
t2 = vec_mergeh(c3[1], c4[1]);
|
2888
|
+
t3 = vec_mergel(c1[1], c2[1]);
|
2889
|
+
t4 = vec_mergel(c3[1], c4[1]);
|
2890
|
+
t5 = vec_xxpermdi(t1, t2, 0);
|
2891
|
+
t6 = vec_xxpermdi(t1, t2, 3);
|
2892
|
+
t7 = vec_xxpermdi(t3, t4, 0);
|
2893
|
+
t8 = vec_xxpermdi(t3, t4, 3);
|
2894
|
+
vec_xst(t5, 0, boffset+16);
|
2895
|
+
vec_xst(t6, 0, boffset+20);
|
2896
|
+
vec_xst(t7, 0, boffset+24);
|
2897
|
+
vec_xst(t8, 0, boffset+28);
|
2898
|
+
|
2899
|
+
aoffset1 += 8*lda;
|
2900
|
+
aoffset2 += 8*lda;
|
2901
|
+
aoffset3 += 8*lda;
|
2902
|
+
aoffset4 += 8*lda;
|
2903
|
+
boffset += 32;
|
2904
|
+
i--;
|
2905
|
+
} while(i > 0);
|
2906
|
+
}
|
2907
|
+
|
2908
|
+
if (cols & 4) {
|
2909
|
+
c1[0] = vec_xl(0, aoffset1);
|
2910
|
+
c2[0] = vec_xl(0, aoffset2);
|
2911
|
+
c3[0] = vec_xl(0, aoffset3);
|
2912
|
+
c4[0] = vec_xl(0, aoffset4);
|
2913
|
+
|
2914
|
+
t1 = vec_mergeh(c1[0], c2[0]);
|
2915
|
+
t2 = vec_mergeh(c3[0], c4[0]);
|
2916
|
+
t3 = vec_xxpermdi(t1, t2, 0);
|
2917
|
+
t4 = vec_xxpermdi(t1, t2, 3);
|
2918
|
+
vec_xst(t3, 0, boffset);
|
2919
|
+
vec_xst(t4, 0, boffset+4);
|
2920
|
+
|
2921
|
+
t1 = vec_mergel(c1[0], c2[0]);
|
2922
|
+
t2 = vec_mergel(c3[0], c4[0]);
|
2923
|
+
t3 = vec_xxpermdi(t1, t2, 0);
|
2924
|
+
t4 = vec_xxpermdi(t1, t2, 3);
|
2925
|
+
vec_xst(t3, 0, boffset+8);
|
2926
|
+
vec_xst(t4, 0, boffset+12);
|
2927
|
+
}
|
2928
|
+
}
|
2929
|
+
if (rows & 3) {
|
2930
|
+
aoffset1 = aoffset;
|
2931
|
+
aoffset2 = aoffset1 + lda;
|
2932
|
+
aoffset3 = aoffset2 + lda;
|
2933
|
+
if (cols & 4) {
|
2934
|
+
c1[0] = vec_xl(0, aoffset1);
|
2935
|
+
c2[0] = vec_xl(0, aoffset2);
|
2936
|
+
c3[0] = vec_xl(0, aoffset3);
|
2937
|
+
|
2938
|
+
t1 = vec_mergeh(c1[0], c2[0]);
|
2939
|
+
t2 = vec_mergeh(c3[0], c4[0]);
|
2940
|
+
t3 = vec_xxpermdi(t1, t2, 0);
|
2941
|
+
t4 = vec_xxpermdi(t1, t2, 3);
|
2942
|
+
vec_xst(t3, 0, boffset);
|
2943
|
+
vec_xst(t4, 0, boffset+4);
|
2944
|
+
|
2945
|
+
t1 = vec_mergel(c1[0], c2[0]);
|
2946
|
+
t2 = vec_mergel(c3[0], c4[0]);
|
2947
|
+
t3 = vec_xxpermdi(t1, t2, 0);
|
2948
|
+
t4 = vec_xxpermdi(t1, t2, 3);
|
2949
|
+
vec_xst(t3, 0, boffset+8);
|
2950
|
+
vec_xst(t4, 0, boffset+12);
|
2951
|
+
}
|
2952
|
+
}
|
2953
|
+
}
|
2954
|
+
|
2955
|
+
void KERNEL_4x4(int64_t ii, int64_t jj) {
|
2956
|
+
vec_t vec_A[4], vec_B[4], vec_C[4];
|
2957
|
+
acc_t acc_0;
|
2958
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
2959
|
+
for (int l = 0; l < k; l+=4) {
|
2960
|
+
packTranspose<vector float>(A+(ii*lda)+l, lda, 4, 4, (TA*)vec_A);
|
2961
|
+
packTranspose<vector float>(B+(jj*ldb)+l, ldb, 4, 4, (TA*)vec_B);
|
2962
|
+
__builtin_mma_xvf32gerpp(&acc_0, vec_A[0], vec_B[0]);
|
2963
|
+
__builtin_mma_xvf32gerpp(&acc_0, vec_A[1], vec_B[1]);
|
2964
|
+
__builtin_mma_xvf32gerpp(&acc_0, vec_A[2], vec_B[2]);
|
2965
|
+
__builtin_mma_xvf32gerpp(&acc_0, vec_A[3], vec_B[3]);
|
2966
|
+
}
|
2967
|
+
SAVE_ACC(&acc_0, ii, jj);
|
2968
|
+
}
|
2969
|
+
|
2970
|
+
void KERNEL_4x8(int64_t ii, int64_t jj) {
|
2971
|
+
vec_t vec_A[4], vec_B[8], vec_C[4];
|
2972
|
+
acc_t acc_0, acc_1;
|
2973
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
2974
|
+
__builtin_mma_xxsetaccz(&acc_1);
|
2975
|
+
for (int64_t l = 0; l < k; l+=4) {
|
2976
|
+
packTranspose<vector float>(A+(ii*lda)+l, lda, 4, 4, (TA*)vec_A);
|
2977
|
+
packTranspose<vector float>(B+(jj*ldb)+l, ldb, 8, 4, (TA*)vec_B);
|
2978
|
+
__builtin_mma_xvf32gerpp(&acc_0, vec_A[0], (vec_t)vec_B[0]);
|
2979
|
+
__builtin_mma_xvf32gerpp(&acc_1, vec_A[0], (vec_t)vec_B[1]);
|
2980
|
+
__builtin_mma_xvf32gerpp(&acc_0, vec_A[1], (vec_t)vec_B[2]);
|
2981
|
+
__builtin_mma_xvf32gerpp(&acc_1, vec_A[1], (vec_t)vec_B[3]);
|
2982
|
+
__builtin_mma_xvf32gerpp(&acc_0, vec_A[2], (vec_t)vec_B[4]);
|
2983
|
+
__builtin_mma_xvf32gerpp(&acc_1, vec_A[2], (vec_t)vec_B[5]);
|
2984
|
+
__builtin_mma_xvf32gerpp(&acc_0, vec_A[3], (vec_t)vec_B[6]);
|
2985
|
+
__builtin_mma_xvf32gerpp(&acc_1, vec_A[3], (vec_t)vec_B[7]);
|
2986
|
+
}
|
2987
|
+
SAVE_ACC(&acc_0, ii, jj);
|
2988
|
+
SAVE_ACC(&acc_1, ii, jj+4);
|
2989
|
+
}
|
2990
|
+
|
2991
|
+
void KERNEL_8x4(int64_t ii, int64_t jj) {
|
2992
|
+
vec_t vec_A[8], vec_B[4], vec_C[4];
|
2993
|
+
acc_t acc_0, acc_1;
|
2994
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
2995
|
+
__builtin_mma_xxsetaccz(&acc_1);
|
2996
|
+
for (int64_t l = 0; l < k; l+=4) {
|
2997
|
+
packTranspose<vector float>(A+(ii*lda)+l, lda, 8, 4, (TA*)vec_A);
|
2998
|
+
packTranspose<vector float>(B+(jj*ldb)+l, ldb, 4, 4, (TA*)vec_B);
|
2999
|
+
__builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[0], vec_B[0]);
|
3000
|
+
__builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[1], vec_B[0]);
|
3001
|
+
__builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[2], vec_B[1]);
|
3002
|
+
__builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[3], vec_B[1]);
|
3003
|
+
__builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[4], vec_B[2]);
|
3004
|
+
__builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[5], vec_B[2]);
|
3005
|
+
__builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[6], vec_B[3]);
|
3006
|
+
__builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[7], vec_B[3]);
|
3007
|
+
}
|
3008
|
+
SAVE_ACC(&acc_0, ii, jj);
|
3009
|
+
SAVE_ACC(&acc_1, ii+4, jj);
|
3010
|
+
}
|
3011
|
+
|
3012
|
+
void KERNEL_8x8(int64_t ii, int64_t jj) {
|
3013
|
+
vec_t vec_A[16], vec_B[16], vec_C[4];
|
3014
|
+
acc_t acc_0, acc_1, acc_2, acc_3;
|
3015
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
3016
|
+
__builtin_mma_xxsetaccz(&acc_1);
|
3017
|
+
__builtin_mma_xxsetaccz(&acc_2);
|
3018
|
+
__builtin_mma_xxsetaccz(&acc_3);
|
3019
|
+
for (int l = 0; l < k; l+=8) {
|
3020
|
+
packTranspose<vector float>(A+(ii*lda)+l, lda, 8, 8, (TA*)vec_A);
|
3021
|
+
packTranspose<vector float>(B+(jj*ldb)+l, ldb, 8, 8, (TA*)vec_B);
|
3022
|
+
for(int x = 0; x < 16; x+=2) {
|
3023
|
+
__builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[x], vec_B[x]);
|
3024
|
+
__builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[x], vec_B[x+1]);
|
3025
|
+
__builtin_mma_xvf32gerpp(&acc_2, (vec_t)vec_A[x+1], vec_B[x]);
|
3026
|
+
__builtin_mma_xvf32gerpp(&acc_3, (vec_t)vec_A[x+1], vec_B[x+1]);
|
3027
|
+
}
|
3028
|
+
}
|
3029
|
+
SAVE_ACC(&acc_0, ii, jj);
|
3030
|
+
SAVE_ACC(&acc_1, ii, jj+4);
|
3031
|
+
SAVE_ACC(&acc_2, ii+4, jj);
|
3032
|
+
SAVE_ACC(&acc_3, ii+4, jj+4);
|
3033
|
+
}
|
3034
|
+
|
3035
|
+
void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
3036
|
+
int64_t mc, nc, mp, np;
|
3037
|
+
int m_rem = MIN(m - m0, 16);
|
3038
|
+
int n_rem = MIN(n - n0, 16);
|
3039
|
+
if (m_rem >= 16 && n_rem >= 8) {
|
3040
|
+
mc = 8;
|
3041
|
+
nc = 8;
|
3042
|
+
gemm<8,8>(m0, m, n0, n);
|
3043
|
+
} else if(m_rem >= 8 && n_rem >= 16) {
|
3044
|
+
mc = 8;
|
3045
|
+
nc = 8;
|
3046
|
+
gemm<8,8>(m0, m, n0, n);
|
3047
|
+
} else if (m_rem >= 8 && n_rem >= 8) {
|
3048
|
+
mc = 8;
|
3049
|
+
nc = 8;
|
3050
|
+
gemm<8,8>(m0, m, n0, n);
|
3051
|
+
} else if (m_rem >= 4 && n_rem >= 8) {
|
3052
|
+
mc = 4;
|
3053
|
+
nc = 8;
|
3054
|
+
gemm<4,8>(m0, m, n0, n);
|
3055
|
+
} else if (m_rem >= 8 && n_rem >= 4) {
|
3056
|
+
mc = 8;
|
3057
|
+
nc = 4;
|
3058
|
+
gemm<8,4>(m0, m, n0, n);
|
3059
|
+
} else if (m_rem >= 4 && n_rem >= 4) {
|
3060
|
+
mc = 4;
|
3061
|
+
nc = 4;
|
3062
|
+
gemm<4,4>(m0, m, n0, n);
|
3063
|
+
} else if ((m_rem < 4) && (n_rem > 4)) {
|
3064
|
+
nc = 4;
|
3065
|
+
switch(m_rem) {
|
3066
|
+
case 1:
|
3067
|
+
mc = 1;
|
3068
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3069
|
+
break;
|
3070
|
+
case 2:
|
3071
|
+
mc = 2;
|
3072
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3073
|
+
break;
|
3074
|
+
case 3:
|
3075
|
+
mc = 3;
|
3076
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3077
|
+
break;
|
3078
|
+
default:
|
3079
|
+
return;
|
3080
|
+
}
|
3081
|
+
} else if ((m_rem > 4) && (n_rem < 4)) {
|
3082
|
+
mc = 4;
|
3083
|
+
switch(n_rem) {
|
3084
|
+
case 1:
|
3085
|
+
nc = 1;
|
3086
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3087
|
+
break;
|
3088
|
+
case 2:
|
3089
|
+
nc = 2;
|
3090
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3091
|
+
break;
|
3092
|
+
case 3:
|
3093
|
+
nc = 3;
|
3094
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3095
|
+
break;
|
3096
|
+
default:
|
3097
|
+
return;
|
3098
|
+
}
|
3099
|
+
} else {
|
3100
|
+
switch((m_rem << 4) | n_rem) {
|
3101
|
+
case 0x43:
|
3102
|
+
mc = 4;
|
3103
|
+
nc = 3;
|
3104
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3105
|
+
break;
|
3106
|
+
case 0x42:
|
3107
|
+
mc = 4;
|
3108
|
+
nc = 2;
|
3109
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3110
|
+
break;
|
3111
|
+
case 0x41:
|
3112
|
+
mc = 4;
|
3113
|
+
nc = 1;
|
3114
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3115
|
+
break;
|
3116
|
+
case 0x34:
|
3117
|
+
mc = 3;
|
3118
|
+
nc = 4;
|
3119
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3120
|
+
break;
|
3121
|
+
case 0x33:
|
3122
|
+
mc = 3;
|
3123
|
+
nc = 3;
|
3124
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3125
|
+
break;
|
3126
|
+
case 0x32:
|
3127
|
+
mc = 3;
|
3128
|
+
nc = 2;
|
3129
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3130
|
+
break;
|
3131
|
+
case 0x31:
|
3132
|
+
mc = 3;
|
3133
|
+
nc = 1;
|
3134
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3135
|
+
break;
|
3136
|
+
case 0x24:
|
3137
|
+
mc = 2;
|
3138
|
+
nc = 4;
|
3139
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3140
|
+
break;
|
3141
|
+
case 0x23:
|
3142
|
+
mc = 2;
|
3143
|
+
nc = 3;
|
3144
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3145
|
+
break;
|
3146
|
+
case 0x22:
|
3147
|
+
mc = 2;
|
3148
|
+
nc = 2;
|
3149
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3150
|
+
break;
|
3151
|
+
case 0x21:
|
3152
|
+
mc = 2;
|
3153
|
+
nc = 1;
|
3154
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3155
|
+
break;
|
3156
|
+
case 0x14:
|
3157
|
+
mc = 1;
|
3158
|
+
nc = 4;
|
3159
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3160
|
+
break;
|
3161
|
+
case 0x13:
|
3162
|
+
mc = 1;
|
3163
|
+
nc = 3;
|
3164
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3165
|
+
break;
|
3166
|
+
case 0x12:
|
3167
|
+
mc = 1;
|
3168
|
+
nc = 2;
|
3169
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3170
|
+
break;
|
3171
|
+
case 0x11:
|
3172
|
+
mc = 1;
|
3173
|
+
nc = 1;
|
3174
|
+
gemm_small(m0, m, n0, n, mc, nc);
|
3175
|
+
break;
|
3176
|
+
default:
|
3177
|
+
return;
|
3178
|
+
}
|
3179
|
+
}
|
3180
|
+
mp = m0 + (m - m0) / mc * mc;
|
3181
|
+
np = n0 + (n - n0) / nc * nc;
|
3182
|
+
mnpack(mp, m, n0, np);
|
3183
|
+
mnpack(m0, m, np, n);
|
3184
|
+
}
|
3185
|
+
|
3186
|
+
void gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n, int RM, int RN) {
|
3187
|
+
int64_t ytiles = (m - m0) / RM;
|
3188
|
+
int64_t xtiles = (n - n0) / RN;
|
3189
|
+
int64_t tiles = xtiles * ytiles;
|
3190
|
+
int64_t duty = (tiles + nth - 1) / nth;
|
3191
|
+
int64_t start = duty * ith;
|
3192
|
+
int64_t end = start + duty;
|
3193
|
+
if (end > tiles)
|
3194
|
+
end = tiles;
|
3195
|
+
for (int64_t job = start; job < end; ++job) {
|
3196
|
+
int64_t ii = m0 + job / xtiles * RM;
|
3197
|
+
int64_t jj = n0 + job % xtiles * RN;
|
3198
|
+
vec_t vec_C[4];
|
3199
|
+
acc_t acc_0;
|
3200
|
+
__builtin_mma_xxsetaccz(&acc_0);
|
3201
|
+
vec_t vec_A[4] {0}, vec_B[4] = {0};
|
3202
|
+
for (int l=0; l<k; l+=4) {
|
3203
|
+
/* 'GEMV Forwarding' concept is used in first two conditional loops.
|
3204
|
+
* when one of the matrix has a single row/column, the elements are
|
3205
|
+
* broadcasted, instead of using packing routine to prepack the
|
3206
|
+
* matrix elements.
|
3207
|
+
*/
|
3208
|
+
if (RM == 1) {
|
3209
|
+
TA* a = const_cast<TA*>(A+(ii)*lda+l);
|
3210
|
+
packTranspose<vector float>(B+(jj*ldb)+l, ldb, RN, 4, (TA*)vec_B);
|
3211
|
+
vec_A[0] = (vec_t)vec_xl(0,a);
|
3212
|
+
vec_A[1] = (vec_t)vec_splats(*((TA*)&vec_A+1));
|
3213
|
+
vec_A[2] = (vec_t)vec_splats(*((TA*)&vec_A+2));
|
3214
|
+
vec_A[3] = (vec_t)vec_splats(*((TA*)&vec_A+3));
|
3215
|
+
} else if (RN == 1) {
|
3216
|
+
packTranspose<vector float>(A+(ii*lda)+l, lda, RM, 4, (TA*)vec_A);
|
3217
|
+
TB* b = const_cast<TB*>(B+(jj)*ldb+l);
|
3218
|
+
vec_B[0] = (vec_t)vec_xl(0,b);
|
3219
|
+
vec_B[1] = (vec_t)vec_splats(*((TB*)&vec_B+1));
|
3220
|
+
vec_B[2] = (vec_t)vec_splats(*((TB*)&vec_B+2));
|
3221
|
+
vec_B[3] = (vec_t)vec_splats(*((TB*)&vec_B+3));
|
3222
|
+
} else {
|
3223
|
+
packTranspose<vector float>(A+(ii*lda)+l, lda, RM, 4, (TA*)vec_A);
|
3224
|
+
packTranspose<vector float>(B+(jj*ldb)+l, ldb, RN, 4, (TA*)vec_B);
|
3225
|
+
}
|
3226
|
+
__builtin_mma_xvf32gerpp(&acc_0, vec_A[0], vec_B[0]);
|
3227
|
+
__builtin_mma_xvf32gerpp(&acc_0, vec_A[1], vec_B[1]);
|
3228
|
+
__builtin_mma_xvf32gerpp(&acc_0, vec_A[2], vec_B[2]);
|
3229
|
+
__builtin_mma_xvf32gerpp(&acc_0, vec_A[3], vec_B[3]);
|
3230
|
+
}
|
3231
|
+
__builtin_mma_disassemble_acc(vec_C, &acc_0);
|
3232
|
+
for (int I = 0; I < RM; I++) {
|
3233
|
+
for (int J = 0; J < RN; J++) {
|
3234
|
+
*((TC*)(C+ii+((jj+J)*ldc)+I)) = *((TC*)&vec_C[I]+J);
|
3235
|
+
}
|
3236
|
+
}
|
3237
|
+
}
|
3238
|
+
}
|
3239
|
+
|
3240
|
+
template <int RM, int RN>
|
3241
|
+
NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
|
3242
|
+
int64_t ytiles = (m - m0) / RM;
|
3243
|
+
int64_t xtiles = (n - n0) / RN;
|
3244
|
+
int64_t tiles = xtiles * ytiles;
|
3245
|
+
int64_t duty = (tiles + nth - 1) / nth;
|
3246
|
+
int64_t start = duty * ith;
|
3247
|
+
int64_t end = start + duty;
|
3248
|
+
if (RM == 4 && RN == 4) {
|
3249
|
+
kernel = &tinyBLAS_PPC::KERNEL_4x4;
|
3250
|
+
} else if (RM == 4 && RN == 8) {
|
3251
|
+
kernel = &tinyBLAS_PPC::KERNEL_4x8;
|
3252
|
+
} else if (RM == 8 && RN == 4) {
|
3253
|
+
kernel = &tinyBLAS_PPC::KERNEL_8x4;
|
3254
|
+
} else if (RM == 8 && RN == 8) {
|
3255
|
+
kernel = &tinyBLAS_PPC::KERNEL_8x8;
|
3256
|
+
}
|
3257
|
+
if (end > tiles)
|
3258
|
+
end = tiles;
|
3259
|
+
for (int64_t job = start; job < end; ++job) {
|
3260
|
+
int64_t ii = m0 + job / xtiles * RM;
|
3261
|
+
int64_t jj = n0 + job % xtiles * RN;
|
3262
|
+
(this->*kernel)(ii, jj);
|
3263
|
+
}
|
3264
|
+
}
|
3265
|
+
|
3266
|
+
const TA *const A;
|
3267
|
+
const TB *const B;
|
3268
|
+
TC *C;
|
3269
|
+
TA *At;
|
3270
|
+
TB *Bt;
|
3271
|
+
const int64_t k;
|
3272
|
+
const int64_t lda;
|
3273
|
+
const int64_t ldb;
|
3274
|
+
const int64_t ldc;
|
3275
|
+
const int ith;
|
3276
|
+
const int nth;
|
3277
|
+
};
|
3278
|
+
#endif
|
3279
|
+
} // namespace
|
3280
|
+
|
3281
|
+
/**
|
3282
|
+
* Performs optimized matrix multiplication on CPU.
|
3283
|
+
*
|
3284
|
+
* This subroutine may compute C = Aᵀ * B with column major ordering.
|
3285
|
+
* Despite its name, this isn't a generalized implementation. Work is
|
3286
|
+
* only performed when a handwritten kernel is written and available.
|
3287
|
+
* Otherwise the caller should fall back to a general matmul routine.
|
3288
|
+
*
|
3289
|
+
* For example, for single-threaded single-precision GEMM you can say
|
3290
|
+
*
|
3291
|
+
* llamafile_sgemm(m, n, k, A, lda, B, ldb, C, ldc,
|
3292
|
+
* 0, 1,
|
3293
|
+
* GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32);
|
3294
|
+
*
|
3295
|
+
* @param m is rows in `A` and `C`
|
3296
|
+
* @param n is cols in `B` and `C`
|
3297
|
+
* @param k is cols in `A` and rows in `B`
|
3298
|
+
* @param A is first input matrix (always transposed)
|
3299
|
+
* @param lda is row stride of `A`
|
3300
|
+
* @param B is second input matrix (never transposed)
|
3301
|
+
* @param ldb is row stride of `B`
|
3302
|
+
* @param C is input/output array of output matrices
|
3303
|
+
* @param ldc is row stride of `C`
|
3304
|
+
* @param ith is thread id (must be less than `nth`)
|
3305
|
+
* @param nth is number of threads (must be greater than zero)
|
3306
|
+
* @param Atype is GGML data type of `A`
|
3307
|
+
* @param Btype is GGML data type of `B`
|
3308
|
+
* @param Ctype is GGML data type of `C`
|
3309
|
+
* @return true if this function was able to service the matmul request
|
3310
|
+
*/
|
3311
|
+
bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64_t n, int64_t k,
|
3312
|
+
const void *A, int64_t lda, const void *B, int64_t ldb, void *C,
|
3313
|
+
int64_t ldc, int Atype, int Btype, int Ctype) {
|
3314
|
+
|
3315
|
+
assert(m >= 0);
|
3316
|
+
assert(n >= 0);
|
3317
|
+
assert(k >= 0);
|
3318
|
+
assert(lda >= k);
|
3319
|
+
assert(ldb >= k);
|
3320
|
+
assert(ldc >= m);
|
3321
|
+
assert(params->nth > 0);
|
3322
|
+
assert(params->ith < params->nth);
|
3323
|
+
|
3324
|
+
// only enable sgemm for prompt processing
|
3325
|
+
#if !defined(__MMA__)
|
3326
|
+
if (n < 2)
|
3327
|
+
return false;
|
3328
|
+
#endif
|
3329
|
+
|
3330
|
+
if (Ctype != GGML_TYPE_F32)
|
3331
|
+
return false;
|
3332
|
+
|
3333
|
+
switch (Atype) {
|
3334
|
+
|
3335
|
+
case GGML_TYPE_F32: {
|
3336
|
+
if (Btype != GGML_TYPE_F32)
|
3337
|
+
return false;
|
3338
|
+
#if defined(__AVX512F__)
|
3339
|
+
tinyBLAS<16, __m512, __m512, float, float, float> tb{ params,
|
3340
|
+
k, (const float *)A, lda,
|
3341
|
+
(const float *)B, ldb,
|
3342
|
+
(float *)C, ldc};
|
3343
|
+
return tb.matmul(m, n);
|
3344
|
+
#elif defined(__AVX__) || defined(__AVX2__)
|
3345
|
+
tinyBLAS<8, __m256, __m256, float, float, float> tb{ params,
|
3346
|
+
k, (const float *)A, lda,
|
3347
|
+
(const float *)B, ldb,
|
3348
|
+
(float *)C, ldc};
|
3349
|
+
return tb.matmul(m, n);
|
3350
|
+
#elif defined(__ARM_NEON)
|
3351
|
+
if (n < 4)
|
3352
|
+
return false;
|
3353
|
+
tinyBLAS<4, float32x4_t, float32x4_t, float, float, float> tb{ params,
|
3354
|
+
k, (const float *)A, lda,
|
3355
|
+
(const float *)B, ldb,
|
3356
|
+
(float *)C, ldc};
|
3357
|
+
return tb.matmul(m, n);
|
3358
|
+
#elif defined(__VXE__) || defined(__VXE2__)
|
3359
|
+
if (n < 4)
|
3360
|
+
return false;
|
3361
|
+
tinyBLAS<4, float32x4_t, float32x4_t, float, float, float> tb{ params,
|
3362
|
+
k, (const float *)A, lda,
|
3363
|
+
(const float *)B, ldb,
|
3364
|
+
(float *)C, ldc};
|
3365
|
+
return tb.matmul(m, n);
|
3366
|
+
#elif defined(__MMA__)
|
3367
|
+
if (k % 8)
|
3368
|
+
return false;
|
3369
|
+
tinyBLAS_PPC<float, float, float> tb{
|
3370
|
+
k, (const float *)A, lda,
|
3371
|
+
(const float *)B, ldb,
|
3372
|
+
(float *)C, ldc,
|
3373
|
+
params->ith, params->nth};
|
3374
|
+
tb.matmul(m, n);
|
3375
|
+
return true;
|
3376
|
+
#else
|
3377
|
+
return false;
|
3378
|
+
#endif
|
3379
|
+
}
|
3380
|
+
|
3381
|
+
case GGML_TYPE_BF16: {
|
3382
|
+
#if defined(__AVX512BF16__)
|
3383
|
+
if (Btype == GGML_TYPE_BF16) {
|
3384
|
+
tinyBLAS<32, __m512, __m512bh, ggml_bf16_t, ggml_bf16_t, float> tb{ params, k,
|
3385
|
+
(const ggml_bf16_t *)A, lda,
|
3386
|
+
(const ggml_bf16_t *)B, ldb,
|
3387
|
+
(float *)C, ldc};
|
3388
|
+
return tb.matmul(m, n);
|
3389
|
+
}
|
3390
|
+
#elif defined(__AVX512F__)
|
3391
|
+
if (Btype == GGML_TYPE_BF16) {
|
3392
|
+
tinyBLAS<16, __m512, __m512, ggml_bf16_t, ggml_bf16_t, float> tb{ params, k,
|
3393
|
+
(const ggml_bf16_t *)A, lda,
|
3394
|
+
(const ggml_bf16_t *)B, ldb,
|
3395
|
+
(float *)C, ldc};
|
3396
|
+
return tb.matmul(m, n);
|
3397
|
+
}
|
3398
|
+
#elif defined(__AVX2__)
|
3399
|
+
if (Btype == GGML_TYPE_BF16) {
|
3400
|
+
tinyBLAS<8, __m256, __m256, ggml_bf16_t, ggml_bf16_t, float> tb{ params, k,
|
3401
|
+
(const ggml_bf16_t *)A, lda,
|
3402
|
+
(const ggml_bf16_t *)B, ldb,
|
3403
|
+
(float *)C, ldc};
|
3404
|
+
return tb.matmul(m, n);
|
3405
|
+
}
|
3406
|
+
#elif defined(__MMA__)
|
3407
|
+
if ((k % 8))
|
3408
|
+
return false;
|
3409
|
+
if(Btype == GGML_TYPE_BF16) {
|
3410
|
+
tinyBLAS_BF16_PPC<ggml_bf16_t, ggml_bf16_t, float> tb{ k,
|
3411
|
+
(const ggml_bf16_t *)A, lda,
|
3412
|
+
(const ggml_bf16_t *)B, ldb,
|
3413
|
+
(float *)C, ldc,
|
3414
|
+
params->ith, params->nth};
|
3415
|
+
tb.matmul(m, n);
|
3416
|
+
return true;
|
3417
|
+
}
|
3418
|
+
#endif
|
3419
|
+
return false;
|
3420
|
+
}
|
3421
|
+
|
3422
|
+
case GGML_TYPE_F16: {
|
3423
|
+
#if defined(__AVX512F__)
|
3424
|
+
if (Btype == GGML_TYPE_F16) {
|
3425
|
+
tinyBLAS<16, __m512, __m512, ggml_fp16_t, ggml_fp16_t, float> tb{ params, k,
|
3426
|
+
(const ggml_fp16_t *)A, lda,
|
3427
|
+
(const ggml_fp16_t *)B, ldb,
|
3428
|
+
(float *)C, ldc};
|
3429
|
+
return tb.matmul(m, n);
|
3430
|
+
}
|
3431
|
+
#elif (defined(__AVX__) || defined(__AVX2__)) && defined(__F16C__)
|
3432
|
+
if (Btype == GGML_TYPE_F16) {
|
3433
|
+
tinyBLAS<8, __m256, __m256, ggml_fp16_t, ggml_fp16_t, float> tb{ params, k,
|
3434
|
+
(const ggml_fp16_t *)A, lda,
|
3435
|
+
(const ggml_fp16_t *)B, ldb,
|
3436
|
+
(float *)C, ldc};
|
3437
|
+
return tb.matmul(m, n);
|
3438
|
+
}
|
3439
|
+
#elif defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER)
|
3440
|
+
if (n < 8)
|
3441
|
+
return false;
|
3442
|
+
if (Btype == GGML_TYPE_F16) {
|
3443
|
+
tinyBLAS<8, float16x8_t, float16x8_t, ggml_fp16_t, ggml_fp16_t, float> tb{ params,
|
3444
|
+
k, (const ggml_fp16_t *)A, lda,
|
3445
|
+
(const ggml_fp16_t *)B, ldb,
|
3446
|
+
(float *)C, ldc};
|
3447
|
+
return tb.matmul(m, n);
|
3448
|
+
}
|
3449
|
+
#elif defined(__ARM_NEON) && !defined(_MSC_VER)
|
3450
|
+
if (Btype == GGML_TYPE_F32) {
|
3451
|
+
tinyBLAS<4, float32x4_t, float32x4_t, ggml_fp16_t, float, float> tb{ params,
|
3452
|
+
k, (const ggml_fp16_t *)A, lda,
|
3453
|
+
(const float *)B, ldb,
|
3454
|
+
(float *)C, ldc};
|
3455
|
+
return tb.matmul(m, n);
|
3456
|
+
}
|
3457
|
+
#elif defined(__VXE__) || defined(__VXE2__)
|
3458
|
+
if (n < 4)
|
3459
|
+
return false;
|
3460
|
+
if (Btype == GGML_TYPE_F16) {
|
3461
|
+
tinyBLAS<4, float32x4_t, float32x4_t, ggml_fp16_t, ggml_fp16_t, float> tb{ params,
|
3462
|
+
k, (const ggml_fp16_t *)A, lda,
|
3463
|
+
(const ggml_fp16_t *)B, ldb,
|
3464
|
+
(float *)C, ldc};
|
3465
|
+
return tb.matmul(m, n);
|
3466
|
+
}
|
3467
|
+
#endif
|
3468
|
+
return false;
|
3469
|
+
}
|
3470
|
+
|
3471
|
+
case GGML_TYPE_Q8_0: {
|
3472
|
+
if (Btype != GGML_TYPE_Q8_0)
|
3473
|
+
return false;
|
3474
|
+
#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__)
|
3475
|
+
tinyBLAS_Q0_AVX<block_q8_0, block_q8_0, float> tb{
|
3476
|
+
k, (const block_q8_0 *)A, lda,
|
3477
|
+
(const block_q8_0 *)B, ldb,
|
3478
|
+
(float *)C, ldc,
|
3479
|
+
params->ith, params->nth};
|
3480
|
+
tb.matmul(m, n);
|
3481
|
+
return true;
|
3482
|
+
#elif defined(__ARM_FEATURE_DOTPROD)
|
3483
|
+
tinyBLAS_Q0_ARM<block_q8_0> tb{
|
3484
|
+
k, (const block_q8_0 *)A, lda,
|
3485
|
+
(const block_q8_0 *)B, ldb,
|
3486
|
+
(float *)C, ldc,
|
3487
|
+
params->ith, params->nth};
|
3488
|
+
tb.matmul(m, n);
|
3489
|
+
return true;
|
3490
|
+
#elif defined(__MMA__)
|
3491
|
+
//TO-DO: Remove this condition once gemv forwarding is enabled.
|
3492
|
+
if (n < 8 && n != 4)
|
3493
|
+
return false;
|
3494
|
+
if (m < 8 && m != 4)
|
3495
|
+
return false;
|
3496
|
+
tinyBLAS_Q0_PPC<block_q8_0, block_q8_0, float> tb{
|
3497
|
+
k, (const block_q8_0 *)A, lda,
|
3498
|
+
(const block_q8_0 *)B, ldb,
|
3499
|
+
(float *)C, ldc,
|
3500
|
+
params->ith, params->nth};
|
3501
|
+
tb.matmul(m, n);
|
3502
|
+
return true;
|
3503
|
+
#else
|
3504
|
+
return false;
|
3505
|
+
#endif
|
3506
|
+
}
|
3507
|
+
|
3508
|
+
case GGML_TYPE_Q4_0: {
|
3509
|
+
if (Btype != GGML_TYPE_Q8_0)
|
3510
|
+
return false;
|
3511
|
+
#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__)
|
3512
|
+
tinyBLAS_Q0_AVX<block_q4_0, block_q8_0, float> tb{
|
3513
|
+
k, (const block_q4_0 *)A, lda,
|
3514
|
+
(const block_q8_0 *)B, ldb,
|
3515
|
+
(float *)C, ldc,
|
3516
|
+
params->ith, params->nth};
|
3517
|
+
tb.matmul(m, n);
|
3518
|
+
return true;
|
3519
|
+
#elif defined(__ARM_FEATURE_DOTPROD)
|
3520
|
+
tinyBLAS_Q0_ARM<block_q4_0> tb{
|
3521
|
+
k, (const block_q4_0 *)A, lda,
|
3522
|
+
(const block_q8_0 *)B, ldb,
|
3523
|
+
(float *)C, ldc,
|
3524
|
+
params->ith, params->nth};
|
3525
|
+
tb.matmul(m, n);
|
3526
|
+
return true;
|
3527
|
+
#elif defined(__MMA__)
|
3528
|
+
//TO-DO: Remove this condition once gemv forwarding is enabled.
|
3529
|
+
if (n < 8 && n != 4)
|
3530
|
+
return false;
|
3531
|
+
if (m < 8 && m != 4)
|
3532
|
+
return false;
|
3533
|
+
tinyBLAS_Q0_PPC<block_q4_0, block_q8_0, float> tb{
|
3534
|
+
k, (const block_q4_0 *)A, lda,
|
3535
|
+
(const block_q8_0 *)B, ldb,
|
3536
|
+
(float *)C, ldc,
|
3537
|
+
params->ith, params->nth};
|
3538
|
+
tb.matmul(m, n);
|
3539
|
+
return true;
|
3540
|
+
#else
|
3541
|
+
return false;
|
3542
|
+
#endif
|
3543
|
+
}
|
3544
|
+
|
3545
|
+
case GGML_TYPE_Q5_0: {
|
3546
|
+
if (Btype != GGML_TYPE_Q8_0)
|
3547
|
+
return false;
|
3548
|
+
#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__)
|
3549
|
+
tinyBLAS_Q0_AVX<block_q5_0, block_q8_0, float> tb{
|
3550
|
+
k, (const block_q5_0 *)A, lda,
|
3551
|
+
(const block_q8_0 *)B, ldb,
|
3552
|
+
(float *)C, ldc,
|
3553
|
+
params->ith, params->nth};
|
3554
|
+
tb.matmul(m, n);
|
3555
|
+
return true;
|
3556
|
+
#else
|
3557
|
+
return false;
|
3558
|
+
#endif
|
3559
|
+
}
|
3560
|
+
|
3561
|
+
case GGML_TYPE_IQ4_NL: {
|
3562
|
+
if (Btype != GGML_TYPE_Q8_0)
|
3563
|
+
return false;
|
3564
|
+
#if defined(__AVX2__) || defined(__AVX512F__) || defined(__AVX__)
|
3565
|
+
tinyBLAS_Q0_AVX<block_iq4_nl, block_q8_0, float> tb{
|
3566
|
+
k, (const block_iq4_nl *)A, lda,
|
3567
|
+
(const block_q8_0 *)B, ldb,
|
3568
|
+
(float *)C, ldc,
|
3569
|
+
params->ith, params->nth};
|
3570
|
+
tb.matmul(m, n);
|
3571
|
+
return true;
|
3572
|
+
#else
|
3573
|
+
return false;
|
3574
|
+
#endif
|
3575
|
+
}
|
3576
|
+
|
3577
|
+
default:
|
3578
|
+
return false;
|
3579
|
+
}
|
3580
|
+
|
3581
|
+
(void)params;
|
3582
|
+
(void)m;
|
3583
|
+
(void)n;
|
3584
|
+
(void)k;
|
3585
|
+
(void)A;
|
3586
|
+
(void)lda;
|
3587
|
+
(void)B;
|
3588
|
+
(void)ldb;
|
3589
|
+
(void)C;
|
3590
|
+
(void)ldc;
|
3591
|
+
(void)Atype;
|
3592
|
+
(void)Btype;
|
3593
|
+
(void)Ctype;
|
3594
|
+
}
|